repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
daveredrum/SceneTex
models/pipeline/texture_pipeline.py
[ { "identifier": "TextureMesh", "path": "models/modules/meshes.py", "snippet": "class TextureMesh(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.num_instances = 0\n\n self._init_mesh()\n\n def apply_texture_to_mesh(self, mesh, faces, aux, texture_tensor, sampling_mode=\"bilinear\"):\n new_mesh = mesh.clone() # in-place operation - DANGER!!!\n new_mesh.textures = TexturesUV(\n maps=texture_tensor, # B, H, W, C\n faces_uvs=faces.textures_idx[None, ...],\n verts_uvs=aux.verts_uvs[None, ...],\n sampling_mode=sampling_mode,\n # align_corners=False\n )\n\n return new_mesh\n \n def repeat_meshes_as_batch(self, mesh, batch_size):\n return join_meshes_as_batch(\n [mesh for _ in range(batch_size)],\n include_textures=True\n )\n\n def _init_mesh(self):\n cache_dir = self.config.log_dir\n\n self.mesh_dict = init_multiple_meshes_as_scene(\n json.load(open(self.config.scene_config_path)), \n str(cache_dir), \n self.device, \n subdivide_factor=self.config.subdivide_factor,\n return_dict=True\n )\n\n self.mesh, self.texture = self._init_texture(self.mesh_dict)\n\n if self.config.use_background:\n self.background_mesh_dict = init_background(\n self.config.background,\n self.mesh.get_bounding_boxes().cpu().numpy()[0],\n str(cache_dir),\n self.device,\n return_dict=True\n )\n\n self.background_mesh, self.background_texture = self._init_texture(self.background_mesh_dict)\n\n def _init_texture(self, mesh_dict):\n texture = torch.randn((\n 1, \n self.config.latent_texture_size, \n self.config.latent_texture_size, \n self.config.latent_channels\n ), requires_grad=True, device=self.device)\n\n mesh = self.apply_texture_to_mesh(\n mesh_dict[\"mesh\"],\n mesh_dict[\"faces\"],\n mesh_dict[\"aux\"],\n texture\n )\n\n if self.config.texture_type == \"hashgrid\":\n texture = HashGrid(\n 2,\n self.config.hashgrid_config.otype,\n self.config.hashgrid_config.n_levels,\n self.config.hashgrid_config.n_features_per_level,\n self.config.hashgrid_config.log2_hashmap_size,\n self.config.hashgrid_config.base_resolution,\n self.config.hashgrid_config.max_resolution,\n torch.float16 if self.config.hashgrid_config.dtype == \"half\" else torch.float32 # full precision to avoid NaN\n )\n \n elif self.config.texture_type == \"hashgrid_mlp\":\n texture = HashGridMLP(\n 2,\n self.config.hashgrid_config,\n self.config.mlp_config\n )\n\n else:\n texture = torch.randn((\n 1, \n self.config.latent_texture_size, \n self.config.latent_texture_size, \n self.config.latent_channels\n ), requires_grad=True, device=self.device)\n\n mesh = self.apply_texture_to_mesh(\n mesh_dict[\"mesh\"],\n mesh_dict[\"faces\"],\n mesh_dict[\"aux\"],\n texture\n )\n\n return mesh, texture\n \n def sort_rand_gpu(self, pop_size, num_samples):\n \"\"\"Generate a random torch.Tensor (GPU) and sort it to generate indices.\"\"\"\n return torch.argsort(torch.rand(pop_size, device=self.device))[:num_samples]\n\n def build_instance_map(self, studio, cache_dir):\n # build instance masks\n instance_map = build_instance_map(studio, \n cache_dir, cache_dir,\n self.config.dummy_texture_path, \n self.device, self.config.texture_size, self.config.render_size, 500).to(self.device)\n\n assert len(instance_map.shape) == 2, \"instance map should be in shape (W, H)\"\n\n # replace the dummy texture with the instance map\n self.mesh = self.apply_texture_to_mesh(\n self.mesh_dict[\"mesh\"],\n self.mesh_dict[\"faces\"],\n self.mesh_dict[\"aux\"],\n instance_map[None, :, :, None].repeat(1, 1, 1, 3),\n \"nearest\"\n )\n \n self.instance_map = instance_map\n \n def sample_instance_anchors(self, cache_dir):\n cache_path = Path(cache_dir) / \"anchors.pth\"\n\n if cache_path.exists():\n print(\"=> loading instance anchors from {}...\".format(str(cache_path)))\n self.instance_anchors = torch.load(str(cache_path))\n self.num_instances = self.instance_anchors.shape[0]\n else:\n print(\"=> sampling instance anchors...\")\n instance_labels = torch.unique(self.instance_map)\n assert instance_labels.shape[0] > 1\n instance_labels = instance_labels[instance_labels != 0]\n\n instance_anchors = []\n for instance_id in instance_labels:\n instance_mask = self.instance_map == instance_id\n uv_coords = torch.nonzero(instance_mask) # NumInsTex, 2\n sampled_ids = self.sort_rand_gpu(uv_coords.shape[0], self.config.num_anchors)\n sampled_uv_coords = uv_coords[sampled_ids, :]\n instance_anchors.append(sampled_uv_coords)\n\n instance_anchors = torch.stack(instance_anchors) # M, NumAnchor, 2\n instance_anchors = instance_anchors.float() / self.config.texture_size\n\n assert instance_anchors.min() >= 0 and instance_anchors.max() <= 1\n\n print(\"=> saving anchors to {}\".format(str(cache_path)))\n torch.save(instance_anchors, str(cache_path))\n\n self.instance_anchors = instance_anchors\n self.num_instances = self.instance_anchors.shape[0]" }, { "identifier": "Studio", "path": "models/modules/studio.py", "snippet": "class Studio(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n # render function\n self.render_func = self._init_render_func()\n\n self._init_camera_settings()\n\n def _init_camera_settings(self):\n if self.config.use_sphere_cameras and not self.config.use_blenderproc_cameras: # use random cameras\n\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n \n dist_linspace = np.linspace(\n self.sphere_cameras.dist.min,\n self.sphere_cameras.dist.max,\n 1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,\n )\n elev_linspace = np.linspace(\n self.sphere_cameras.elev.min,\n self.sphere_cameras.elev.max,\n 1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,\n )\n azim_linspace = np.linspace(\n self.sphere_cameras.azim.min,\n self.sphere_cameras.azim.max,\n 1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,\n )\n fov_linspace = np.linspace(\n self.sphere_cameras.fov.min,\n self.sphere_cameras.fov.max,\n 1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,\n )\n at = np.array(self.sphere_cameras.at)\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n dist_list = combinations[:, 0].tolist()\n elev_list = combinations[:, 1].tolist()\n azim_list = combinations[:, 2].tolist()\n\n self.Rs, self.Ts = init_trajectory(dist_list, elev_list, azim_list, at)\n self.fov_list = combinations[:, 3].tolist()\n\n self.num_cameras = len(self.Rs)\n\n print(\"=> using {} spherical cameras for training\".format(self.num_cameras))\n\n elif not self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:\n\n poses = json.load(open(self.config.blenderproc_cameras))\n self.Rs, self.Ts = init_blenderproc_trajectory(poses, self.device)\n\n self.num_cameras = len(self.Rs)\n self.fov_list = [self.config.fov] * self.num_cameras\n\n print(\"=> using {} blenderproc cameras for training\".format(self.num_cameras))\n\n elif self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:\n\n # spherical cameras\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n \n dist_linspace = np.linspace(\n self.sphere_cameras.dist.min,\n self.sphere_cameras.dist.max,\n 1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,\n )\n elev_linspace = np.linspace(\n self.sphere_cameras.elev.min,\n self.sphere_cameras.elev.max,\n 1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,\n )\n azim_linspace = np.linspace(\n self.sphere_cameras.azim.min,\n self.sphere_cameras.azim.max,\n 1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,\n )\n fov_linspace = np.linspace(\n self.sphere_cameras.fov.min,\n self.sphere_cameras.fov.max,\n 1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,\n )\n at = np.array(self.sphere_cameras.at)\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n dist_list = combinations[:, 0].tolist()\n elev_list = combinations[:, 1].tolist()\n azim_list = combinations[:, 2].tolist()\n\n sphere_Rs, sphere_Ts = init_trajectory(dist_list, elev_list, azim_list, at)\n sphere_fov_list = combinations[:, 3].tolist()\n\n # blenderproc cameras\n poses = json.load(open(self.config.blenderproc_cameras))\n blenderproc_Rs, blenderproc_Ts = init_blenderproc_trajectory(poses, self.device)\n blenderproc_fov_list = [self.config.fov] * len(blenderproc_Rs)\n \n self.Rs = sphere_Rs + blenderproc_Rs\n self.Ts = sphere_Ts + blenderproc_Ts\n self.fov_list = sphere_fov_list + blenderproc_fov_list\n self.num_cameras = len(self.Rs)\n\n print(\"=> using {} spherical cameras and {} blenderproc cameras for training\".format(len(sphere_Rs), len(blenderproc_Rs)))\n\n # self.sphere_Rs = sphere_Rs\n # self.sphere_Ts = sphere_Ts\n # self.sphere_fov_list = sphere_fov_list\n # self.num_sphere_cameras = len(self.sphere_Rs)\n\n # self.Rs = sphere_Rs + blenderproc_Rs\n # self.Ts = sphere_Ts + blenderproc_Ts\n # self.fov_list = sphere_fov_list + blenderproc_fov_list\n # self.num_cameras = len(self.Rs)\n\n # print(\"=> using {} spherical cameras and {} blenderproc cameras for training\".format(len(sphere_Rs), len(blenderproc_Rs)))\n # print(\"=> using {} cameras before annealing and {} cameras afterwards\".format(self.num_sphere_cameras, self.num_cameras))\n\n else: # use fixed cameras\n raise NotImplementedError\n\n # for inference \n # FIXME only support spherical cameras for now\n # spherical cameras\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n\n dist_linspace = [self.sphere_cameras.dist.min] # always take the min dist from spherical cameras\n elev_linspace = [self.config.elev]\n azim_linspace = np.linspace(\n self.config.azim[0],\n self.config.azim[1],\n self.config.log_latents_views,\n )\n fov_linspace = [self.config.fov]\n at = np.array(self.sphere_cameras.at) # always take the cameras center from spherical cameras\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n self.inference_dist_list = combinations[:, 0].tolist()\n self.inference_elev_list = combinations[:, 1].tolist()\n self.inference_azim_list = combinations[:, 2].tolist()\n self.inference_fov_list = combinations[:, 3].tolist()\n self.inference_at = at\n\n self.num_inference_cameras = len(self.inference_dist_list)\n\n print(\"=> using {} cameras for training, {} cameras for inference.\".format(self.num_cameras, self.num_inference_cameras))\n\n def _init_render_func(self):\n if self.config.render_func_type == \"mlp\":\n if self.config.texture_type == \"hashgrid\":\n in_channels = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level\n elif self.config.texture_type == \"hashgrid_mlp\":\n in_channels = self.config.mlp_config.out_channels\n else:\n in_channels = self.config.latent_channels\n\n render_func = MLP(\n in_channels,\n self.config.render_channels,\n self.config.view_embedding_hidden_dim,\n self.config.num_view_embedding_layers,\n dtype=torch.float32\n ).to(self.device)\n \n elif self.config.render_func_type == \"none\":\n render_func = nn.Identity()\n\n else:\n raise NotImplementedError(\"not supported render function type: {}\".format(self.config.render_func_type))\n\n return render_func\n \n def init_anchor_func(self, num_instances):\n if self.config.texture_type == \"hashgrid\":\n anchor_dim = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level\n elif self.config.texture_type == \"hashgrid_mlp\":\n anchor_dim = self.config.mlp_config.out_channels\n else:\n anchor_dim = self.config.latent_channels\n\n anchor_func = AnchorTransformer(self.config, self.device, anchor_dim=anchor_dim, num_instances=num_instances).to(self.device)\n\n self.anchor_func = anchor_func\n\n def set_cameras(self, R, T, fov, image_size):\n return init_camera_R_T(R, T, image_size, self.device, fov)\n \n def set_renderer(self, camera, image_size):\n return init_renderer(camera,\n shader=init_flat_texel_shader(\n camera=camera,\n device=self.device\n ),\n image_size=image_size, \n faces_per_pixel=self.config.faces_per_pixel\n )\n\n def _sample_one_camera(self, step, random_cameras=False, inference=False):\n R, T, fov, idx = None, None, None, None\n if inference:\n idx = step % self.num_inference_cameras\n dist = self.inference_dist_list[idx]\n elev = self.inference_elev_list[idx]\n azim = self.inference_azim_list[idx]\n fov = self.inference_fov_list[idx]\n at = self.inference_at\n R, T = look_at_view_transform(dist, elev, azim, at=at)\n else:\n\n if random_cameras:\n idx = random.choice(range(self.num_cameras))\n else:\n idx = step % self.num_cameras\n\n R, T, fov = self.Rs[idx], self.Ts[idx], self.fov_list[idx]\n\n # if self.config.use_sphere_cameras and self.config.use_blenderproc_cameras and step < self.config.num_anneal_steps:\n \n # if random_cameras:\n # idx = random.choice(range(self.num_sphere_cameras))\n # else:\n # idx = step % self.num_sphere_cameras\n\n # R, T, fov = self.sphere_Rs[idx], self.sphere_Ts[idx], self.sphere_fov_list[idx]\n\n # else:\n\n # if random_cameras:\n # idx = random.choice(range(self.num_cameras))\n # else:\n # idx = step % self.num_cameras\n\n # R, T, fov = self.Rs[idx], self.Ts[idx], self.fov_list[idx]\n\n return R, T, fov, idx\n \n def sample_cameras(self, step, num_samples, random_cameras=False, inference=False):\n if num_samples == 1:\n return self._sample_one_camera(step, random_cameras, inference)\n else:\n Rs, Ts, fovs, ids = [], [], [], []\n cur_step = step % self.num_cameras\n \n if random_cameras:\n pool = [e for e in range(self.num_cameras) if e != cur_step]\n next_steps = random.sample(pool, k=num_samples-1)\n else:\n next_steps = [(cur_step+s+1) % self.num_cameras for s in range(num_samples-1)]\n\n steps = [cur_step] + next_steps\n for s in steps:\n R, T, fov, idx = self._sample_one_camera(s)\n Rs.append(R)\n Ts.append(T)\n fovs.append(fov)\n ids.append(idx)\n\n Rs = torch.cat(Rs, dim=0)\n Ts = torch.cat(Ts, dim=0)\n\n return Rs, Ts, fovs, ids\n\n def get_uv_coordinates(self, mesh, fragments):\n xyzs = mesh.verts_padded() # (N, V, 3)\n faces = mesh.faces_padded() # (N, F, 3)\n\n faces_uvs = mesh.textures.faces_uvs_padded()\n verts_uvs = mesh.textures.verts_uvs_padded()\n\n # NOTE Meshes are replicated in batch. Taking the first one is enough.\n batch_size, _, _ = xyzs.shape\n xyzs, faces, faces_uvs, verts_uvs = xyzs[0], faces[0], faces_uvs[0], verts_uvs[0]\n faces_coords = verts_uvs[faces_uvs] # (F, 3, 2)\n\n # replicate the coordinates as batch\n faces_coords = faces_coords.repeat(batch_size, 1, 1)\n\n invalid_mask = fragments.pix_to_face == -1\n target_coords = interpolate_face_attributes(\n fragments.pix_to_face, fragments.bary_coords, faces_coords\n ) # (N, H, W, 1, 3)\n _, H, W, K, _ = target_coords.shape\n target_coords[invalid_mask] = 0\n assert K == 1 # pixel_per_faces should be 1\n target_coords = target_coords.squeeze(3) # (N, H, W, 2)\n\n return target_coords\n\n def get_relative_depth_map(self, zbuf, pad_value=10):\n absolute_depth = zbuf[..., 0] # B, H, W\n no_depth = -1\n\n depth_min, depth_max = absolute_depth[absolute_depth != no_depth].min(), absolute_depth[absolute_depth != no_depth].max()\n target_min, target_max = 50, 255\n\n depth_value = absolute_depth[absolute_depth != no_depth]\n depth_value = depth_max - depth_value # reverse values\n\n depth_value /= (depth_max - depth_min)\n depth_value = depth_value * (target_max - target_min) + target_min\n\n relative_depth = absolute_depth.clone()\n relative_depth[absolute_depth != no_depth] = depth_value\n relative_depth[absolute_depth == no_depth] = pad_value # not completely black\n\n return absolute_depth, relative_depth\n\n def query_texture(self, coords, texture, encode=True):\n assert \"hashgrid\" in self.config.texture_type\n\n if encode:\n B, H, W, C = coords.shape\n inputs = coords.reshape(-1, C)\n outputs = texture(inputs)\n outputs = outputs.reshape(B, H, W, -1)\n else:\n outputs = coords\n\n return outputs.to(torch.float32)\n \n def query_anchor_features(self, anchors, texture, features, instances_in_view, is_background=False):\n if is_background:\n anchor_features = features\n else:\n # with torch.no_grad():\n # anchors = self.query_texture(anchors.unsqueeze(2), texture).squeeze(2) # M, NumAnchor, C\n # if self.config.detach_anchors:\n # anchors = anchors.detach() # the original UV features won't be updated\n\n anchors = self.query_texture(anchors.unsqueeze(2), texture).squeeze(2) # M, NumAnchor, C\n if self.config.detach_anchors:\n anchors = anchors.detach() # the original UV features won't be updated\n \n anchor_features = self.anchor_func(anchors, features, instances_in_view) # M, C\n\n return anchor_features\n\n def render_features(self, renderer, mesh, texture, is_direct=False, is_background=False, anchors=None):\n # if enable_anchor_embedding is True\n # latents will be the rendered instance map\n latents, fragments = renderer(mesh) # image: (N, H, W, C)\n\n if is_direct:\n features = latents\n else:\n uv_coords = self.get_uv_coordinates(mesh, fragments)\n features = self.query_texture(uv_coords, texture)\n\n if self.config.enable_anchor_embedding:\n features = self.query_anchor_features(anchors, texture, features, latents[..., 0], is_background)\n\n features = self.render_func(features)\n\n absolute_depth, relative_depth = self.get_relative_depth_map(fragments.zbuf)\n\n return features, fragments, absolute_depth, relative_depth # (N, H, W, C)\n \n def render(self, renderer, mesh, texture, background=None, background_texture=None, anchors=None, is_direct=False):\n features, fragments, absolute_depth, relative_depth = self.render_features(renderer, mesh, texture, is_direct=is_direct, is_background=False, anchors=anchors)\n\n # blend background\n # NOTE there's no need to render background if no views see the background\n if background is not None and -1 in fragments.zbuf:\n background_features, background_fragments, _, _ = self.render_features(renderer, background, background_texture, is_direct=is_direct, is_background=True, anchors=None)\n\n # blend rendering\n background_mask = fragments.zbuf == -1\n background_mask = background_mask.repeat(1, 1, 1, background_features.shape[-1])\n features[background_mask] = background_features[background_mask]\n\n # blend depth\n background_mask = fragments.zbuf == -1\n blend_zbuf = fragments.zbuf\n blend_zbuf[background_mask] = background_fragments.zbuf[background_mask]\n absolute_depth, relative_depth = self.get_relative_depth_map(blend_zbuf)\n\n return features, absolute_depth, relative_depth" }, { "identifier": "Guidance", "path": "models/modules/guidance.py", "snippet": "class Guidance(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.prompt = config.prompt + \", \" + config.a_prompt if config.a_prompt else config.prompt\n self.n_prompt = config.n_prompt\n \n self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32\n\n self._init_guidance()\n\n def _init_guidance(self):\n self._init_backbone()\n self._init_t_schedule()\n\n def _init_backbone(self):\n if self.config.diffusion_type == \"t2i\":\n from diffusers import StableDiffusionPipeline as DiffusionPipeline\n checkpoint_name = \"stabilityai/stable-diffusion-2-1-base\"\n # diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n # checkpoint_name = \"runwayml/stable-diffusion-v1-5\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n elif self.config.diffusion_type == \"d2i\":\n from diffusers import StableDiffusionDepth2ImgPipeline as DiffusionPipeline\n checkpoint_name = \"stabilityai/stable-diffusion-2-depth\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n elif self.config.diffusion_type == \"d2i_controlnet\":\n from diffusers import StableDiffusionControlNetPipeline as DiffusionPipeline\n controlnet_name = \"lllyasviel/control_v11f1p_sd15_depth\"\n controlnet = ControlNetModel.from_pretrained(controlnet_name)\n checkpoint_name = \"runwayml/stable-diffusion-v1-5\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name, controlnet=controlnet).to(self.device)\n\n # freeze controlnet\n self.controlnet = diffusion_model.controlnet.to(self.weights_dtype)\n self.controlnet.requires_grad_(False)\n else:\n raise ValueError(\"invalid diffusion type.\")\n\n if self.config.enable_memory_efficient_attention:\n print(\"=> Enable memory efficient attention.\")\n diffusion_model.enable_xformers_memory_efficient_attention()\n\n # pretrained diffusion model\n self.tokenizer = diffusion_model.tokenizer\n self.text_encoder = diffusion_model.text_encoder\n self.vae = diffusion_model.vae\n self.unet = diffusion_model.unet.to(self.weights_dtype)\n\n self.text_encoder.requires_grad_(False)\n self.vae.requires_grad_(False)\n self.unet.requires_grad_(False)\n\n # use DDIMScheduler by default\n self.scheduler = DDIMScheduler.from_pretrained(checkpoint_name, subfolder=\"scheduler\")\n self.scheduler.betas = self.scheduler.betas.to(self.device)\n self.scheduler.alphas = self.scheduler.alphas.to(self.device)\n self.scheduler.alphas_cumprod = self.scheduler.alphas_cumprod.to(self.device)\n\n self.num_train_timesteps = len(self.scheduler.betas)\n\n if self.config.generation_mode == \"t2i\":\n self.scheduler.set_timesteps(self.config.num_steps)\n raise NotImplementedError\n else:\n self.scheduler.set_timesteps(self.num_train_timesteps)\n\n # phi\n # unet_phi is the same instance as unet that has been modified in-place\n # unet_phi not grad -> only train unet_phi_layers\n if self.config.loss_type == \"vsd\":\n self.unet_phi, self.unet_phi_layers = extract_lora_diffusers(self.unet, self.device)\n\n # load pretrained lora\n if len(self.config.load_lora_weights) > 0 and os.path.exists(self.config.load_lora_weights):\n print(\"=> loading pretrained LoRA weights from: {}\".format(self.config.load_lora_weights))\n self.unet_phi.load_attn_procs(self.config.load_lora_weights)\n\n # loss weights\n self.loss_weights = self._init_loss_weights(self.scheduler.betas)\n\n self.avg_loss_vsd = []\n self.avg_loss_phi = []\n self.avg_loss_rgb = []\n\n if self.config.loss_type == \"l2\": \n self.label = torchvision.io.read_image(self.config.label_path).float().to(self.device) / 255.\n self.label = self.label * 2 - 1 # -1 to 1\n self.label = self.label.unsqueeze(0)\n\n max_memory_allocated = torch.cuda.max_memory_allocated()\n print(f\"=> Maximum GPU memory allocated by PyTorch: {max_memory_allocated / 1024**3:.2f} GB\")\n\n def _init_loss_weights(self, betas): \n num_train_timesteps = len(betas)\n betas = torch.tensor(betas).to(torch.float32) if not torch.is_tensor(betas) else betas\n alphas = 1.0 - betas\n alphas_cumprod = torch.cumprod(alphas, axis=0)\n sqrt_1m_alphas_cumprod = torch.sqrt(1. - alphas_cumprod)\n \n weights = []\n for i in range(num_train_timesteps):\n weights.append(sqrt_1m_alphas_cumprod[i]**2)\n \n return weights\n \n def _init_t_schedule(self, t_start=0.02, t_end=0.98):\n # Create a list of time steps from 0 to num_train_timesteps\n ts = list(range(self.num_train_timesteps))\n # set ts to U[0.02,0.98] as least\n t_start = int(t_start * self.num_train_timesteps)\n t_end = int(t_end * self.num_train_timesteps)\n ts = ts[t_start:t_end]\n\n # If the scheduling strategy is \"random\", choose args.num_steps random time steps without replacement\n if self.config.t_schedule == \"random\":\n chosen_ts = np.random.choice(ts, self.config.num_steps, replace=True)\n\n # If the scheduling strategy is \"t_stages\", the total number of time steps are divided into several stages.\n # In each stage, a decreasing portion of the total time steps is considered for selection.\n # For each stage, time steps are randomly selected with replacement from the respective portion.\n # The final list of chosen time steps is a concatenation of the time steps selected in all stages.\n # Note: The total number of time steps should be evenly divisible by the number of stages.\n elif \"t_stages\" in self.config.t_schedule:\n # Parse the number of stages from the scheduling strategy string\n num_stages = int(self.config.t_schedule[8:]) if len(self.config.t_schedule[8:]) > 0 else 2\n chosen_ts = []\n for i in range(num_stages):\n # Define the portion of ts to be considered in this stage\n portion = ts[:int((num_stages-i)*len(ts)//num_stages)]\n selected_ts = np.random.choice(portion, self.config.num_steps//num_stages, replace=True).tolist()\n chosen_ts += selected_ts\n \n elif \"anneal\" in self.config.t_schedule:\n print(\"=> time step annealing after {} steps\".format(self.config.num_anneal_steps))\n\n ts_before_anneal = np.random.choice(ts, self.config.num_anneal_steps, replace=True).tolist()\n ts_after_anneal = np.random.choice(ts[:len(ts)//2], self.config.num_steps-self.config.num_anneal_steps, replace=True).tolist()\n chosen_ts = ts_before_anneal + ts_after_anneal\n \n else:\n raise ValueError(f\"Unknown scheduling strategy: {self.config.t_schedule}\")\n\n # Return the list of chosen time steps\n self.chosen_ts = chosen_ts\n\n def init_text_embeddings(self, batch_size):\n ### get text embedding\n text_input = self.tokenizer(\n [self.prompt], \n padding=\"max_length\", \n max_length=self.tokenizer.model_max_length, \n truncation=True, \n return_tensors=\"pt\"\n ).input_ids.to(self.device)\n\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input)[0].repeat(batch_size, 1, 1)\n\n max_length = text_input.shape[-1]\n uncond_input = self.tokenizer(\n [self.n_prompt], \n padding=\"max_length\", \n max_length=max_length, \n return_tensors=\"pt\"\n ).input_ids.to(self.device)\n\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input)[0].repeat(batch_size, 1, 1)\n\n self.text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n def prepare_depth_map(self, depth_map):\n assert len(depth_map.shape) == 4\n if \"controlnet\" in self.config.diffusion_type:\n depth_map = depth_map.repeat(1, 3, 1, 1).float()\n depth_map = F.interpolate(depth_map, (self.config.render_size, self.config.render_size), mode=\"bilinear\", align_corners=False)\n \n # expected range [0,1]\n depth_map /= 255.0\n else:\n # down-sample and normalize\n depth_map = F.interpolate(depth_map, (self.config.latent_size, self.config.latent_size), mode=\"bilinear\", align_corners=False)\n\n # expected range [-1,1]\n depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0\n # depth_map /= 255.0\n # depth_map = 2.0 * depth_map - 1.0\n\n depth_map = depth_map.to(torch.float32)\n\n return depth_map\n \n @torch.no_grad()\n def decode_latent_texture(self, inputs, use_patches=False):\n outputs = 1 / self.vae.config.scaling_factor * inputs\n\n if use_patches:\n assert self.config.latent_texture_size % self.config.decode_texture_size == 0\n batch_size = inputs.shape[0]\n num_iter_x = self.config.latent_texture_size // self.config.decode_texture_size\n num_iter_y = self.config.latent_texture_size // self.config.decode_texture_size\n patch_stride = self.config.decode_texture_size\n decoded_stride = self.config.decode_texture_size * 8\n decoded_size = self.config.latent_texture_size * 8\n decoded_texture = torch.zeros(batch_size, 3, decoded_size, decoded_size).to(self.device)\n\n for x in range(num_iter_x):\n for y in range(num_iter_y):\n patch = outputs[:, :, x*patch_stride:(x+1)*patch_stride, y*patch_stride:(y+1)*patch_stride]\n patch = self.vae.decode(patch.contiguous()).sample # B, 3, H, W\n\n decoded_texture[:, :, x*decoded_stride:(x+1)*decoded_stride, y*decoded_stride:(y+1)*decoded_stride] = patch\n \n outputs = (decoded_texture / 2 + 0.5).clamp(0, 1)\n\n else:\n outputs = self.vae.decode(outputs.contiguous()).sample # B, 3, H, W\n outputs = (outputs / 2 + 0.5).clamp(0, 1)\n\n return outputs\n \n def encode_latent_texture(self, inputs, deterministic=False):\n inputs = inputs.clamp(-1, 1)\n \n h = self.vae.encoder(inputs)\n moments = self.vae.quant_conv(h)\n mean, logvar = torch.chunk(moments, 2, dim=1)\n std = torch.zeros_like(mean) if deterministic else torch.exp(0.5 * logvar)\n sample = mean + std * torch.randn_like(mean)\n \n return self.vae.config.scaling_factor * sample\n\n def normalize_latent_texture(self, inputs):\n outputs = (inputs / 2 + 0.5).clamp(0, 1)\n\n return outputs\n \n def prepare_one_latent(self, latents, t):\n noise = torch.randn_like(latents).to(self.device)\n noisy_latents = self.scheduler.add_noise(latents, noise, t)\n clean_latents = self.scheduler.step(noise, t, noisy_latents).pred_original_sample\n\n return noise, noisy_latents, clean_latents\n\n def prepare_latents(self, latents, t, batch_size):\n t = torch.tensor([t]).to(self.device)\n noise, noisy_latents, clean_latents = self.prepare_one_latent(latents, t)\n\n return t, noise, noisy_latents, clean_latents\n \n def predict_noise(self, unet, noisy_latents, t, cross_attention_kwargs, guidance_scale, control=None):\n down_block_res_samples, mid_block_res_sample = None, None\n\n if guidance_scale == 1:\n latent_model_input = noisy_latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n \n text_embeddings, _ = self.text_embeddings.chunk(2)\n\n if control is not None: \n if \"controlnet\" in self.config.diffusion_type:\n with torch.no_grad():\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n latent_model_input.to(self.weights_dtype),\n t,\n encoder_hidden_states=text_embeddings.to(self.weights_dtype),\n controlnet_cond=control.to(self.weights_dtype),\n conditioning_scale=1.0,\n guess_mode=False,\n return_dict=False,\n )\n\n down_block_res_samples = [e.to(self.weights_dtype) for e in down_block_res_samples]\n mid_block_res_sample = mid_block_res_sample.to(self.weights_dtype)\n else:\n latent_model_input = torch.cat([latent_model_input, control], dim=1)\n\n # if self.config.verbose_mode: start = time.time()\n noise_pred = unet(\n latent_model_input.to(self.weights_dtype), \n t, \n encoder_hidden_states=text_embeddings.to(self.weights_dtype), \n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample\n ).sample.to(torch.float32)\n # if self.config.verbose_mode: print(\"=> UNet forward: {}s\".format(time.time() - start))\n else:\n latent_model_input = torch.cat([noisy_latents] * 2)\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n \n if control is not None: \n if \"controlnet\" in self.config.diffusion_type:\n with torch.no_grad():\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n latent_model_input.to(self.weights_dtype),\n t,\n encoder_hidden_states=self.text_embeddings.to(self.weights_dtype),\n controlnet_cond=torch.cat([control]*2).to(self.weights_dtype),\n conditioning_scale=1.0,\n guess_mode=False,\n return_dict=False,\n )\n\n down_block_res_samples = [e.to(self.weights_dtype) for e in down_block_res_samples]\n mid_block_res_sample = mid_block_res_sample.to(self.weights_dtype)\n else:\n latent_model_input = torch.cat([latent_model_input, torch.cat([control]*2)], dim=1)\n\n # if self.config.verbose_mode: start = time.time()\n noise_pred = unet(\n latent_model_input.to(self.weights_dtype), \n t, \n encoder_hidden_states=self.text_embeddings.to(self.weights_dtype), \n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample\n ).sample.to(torch.float32)\n # if self.config.verbose_mode: print(\"=> UNet forward: {}s\".format(time.time() - start))\n\n # perform guidance\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n return noise_pred\n\n def compute_sds_loss(self, latents, noisy_latents, noise, t, control=None):\n with torch.no_grad():\n noise_pred = self.predict_noise(\n self.unet, \n noisy_latents, \n t, \n cross_attention_kwargs={},\n guidance_scale=self.config.guidance_scale,\n control=control\n )\n\n grad = self.config.grad_scale * (noise_pred - noise)\n grad = torch.nan_to_num(grad)\n\n grad *= self.loss_weights[int(t)]\n \n # d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad\n target = (latents - grad).detach()\n loss = 0.5 * F.mse_loss(latents, target, reduction=\"mean\")\n\n return loss\n \n def compute_vsd_loss(self, latents, noisy_latents, noise, t, cross_attention_kwargs, control=None): \n with torch.no_grad():\n # predict the noise residual with unet\n # set cross_attention_kwargs={\"scale\": 0} to use the pre-trained model\n if self.config.verbose_mode: start = time.time()\n noise_pred = self.predict_noise(\n self.unet, \n noisy_latents, \n t, \n cross_attention_kwargs={\"scale\": 0},\n guidance_scale=self.config.guidance_scale,\n control=control\n )\n if self.config.verbose_mode: print(\"=> VSD pretrained forward: {}s\".format(time.time() - start))\n\n if self.config.verbose_mode: start = time.time()\n noise_pred_phi = self.predict_noise(\n self.unet_phi, \n noisy_latents, \n t, \n cross_attention_kwargs=cross_attention_kwargs,\n guidance_scale=self.config.guidance_scale_phi,\n control=control\n )\n if self.config.verbose_mode: print(\"=> VSD lora forward: {}s\".format(time.time() - start))\n\n grad = self.config.grad_scale * (noise_pred - noise_pred_phi.detach())\n grad = torch.nan_to_num(grad)\n\n grad *= self.loss_weights[int(t)]\n \n # d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad\n target = (latents - grad).detach()\n loss = 0.5 * F.mse_loss(latents, target, reduction=\"none\")\n\n return loss, loss.mean()\n \n def compute_vsd_phi_loss(self, noisy_latents, clean_latents, noise, t, cross_attention_kwargs, control=None):\n if self.config.verbose_mode: start = time.time()\n noise_pred_phi = self.predict_noise(\n self.unet_phi, \n noisy_latents, \n t, \n cross_attention_kwargs=cross_attention_kwargs,\n guidance_scale=self.config.guidance_scale_phi,\n control=control\n )\n\n if self.config.verbose_mode: print(\"=> phi lora forward: {}s\".format(time.time() - start))\n\n target = noise\n\n loss = self.config.grad_scale * F.mse_loss(noise_pred_phi, target, reduction=\"none\")\n\n return loss, loss.mean()" } ]
import random import wandb import json import os import time import torch import torch.nn as nn import torch.nn.functional as F import torchvision import numpy as np import pytorch_lightning as pl import matplotlib.pyplot as plt import sys import open_clip from torch.optim import Adam, AdamW from torch.optim.lr_scheduler import LinearLR from omegaconf import OmegaConf from tqdm import tqdm from omegaconf import OmegaConf from PIL import Image from copy import deepcopy from pathlib import Path from pytorch3d.io import ( load_obj, load_objs_as_meshes ) from pytorch3d.renderer import TexturesUV from pytorch3d.ops import interpolate_face_attributes from models.modules import TextureMesh, Studio, Guidance
10,818
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self):
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self):
self.studio = Studio(self.config, self.device)
1
2023-11-28 15:38:40+00:00
16k
Vchitect/VBench
vbench/third_party/umt/datasets/build.py
[ { "identifier": "TubeMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * self.width\n self.total_patches = self.frames * self.num_patches_per_frame \n self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame)\n self.total_masks = self.frames * self.num_masks_per_frame\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.total_patches, self.total_masks\n )\n return repr_str\n\n def __call__(self):\n mask_per_frame = np.hstack([\n np.zeros(self.num_patches_per_frame - self.num_masks_per_frame),\n np.ones(self.num_masks_per_frame),\n ])\n np.random.shuffle(mask_per_frame)\n mask = np.tile(mask_per_frame, (self.frames, 1)).flatten()\n return mask " }, { "identifier": "RandomMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class RandomMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n if not isinstance(input_size, tuple):\n input_size = (input_size, ) * 3\n\n self.frames, self.height, self.width = input_size\n\n self.num_patches = self.frames * self.height * self.width # 8x14x14\n self.num_mask = int(mask_ratio * self.num_patches)\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.num_patches, self.num_mask)\n return repr_str\n\n def __call__(self):\n mask = np.hstack([\n np.zeros(self.num_patches - self.num_mask),\n np.ones(self.num_mask),\n ])\n np.random.shuffle(mask)\n return mask # [196*8]" }, { "identifier": "VideoMAE", "path": "vbench/third_party/umt/datasets/mae.py", "snippet": "class VideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own video classification dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are three items in each line: (1) video path; (2) video length and (3) video label.\n prefix : str, required.\n The prefix for loading data.\n split : str, required.\n The split character for metadata.\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default None.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n video_loader : bool, default False.\n Whether to use video loader to load data.\n use_decord : bool, default True.\n Whether to use Decord video loader to load data. Otherwise load image.\n transform : function, default None.\n A function that takes data and label and transforms them.\n data_aug : str, default 'v1'.\n Different types of data augmentation auto. Supports v1, v2, v3 and v4.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n \"\"\"\n def __init__(self,\n root,\n setting,\n prefix='',\n split=' ',\n train=True,\n test_mode=False,\n name_pattern='img_%05d.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n video_loader=False,\n use_decord=True,\n lazy_init=False,\n num_sample=1,\n ):\n\n super(VideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.prefix = prefix\n self.split = split\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_loader = video_loader\n self.video_ext = video_ext\n self.use_decord = use_decord\n self.transform = transform\n self.lazy_init = lazy_init\n self.num_sample = num_sample\n\n # sparse sampling, num_segments != 1\n if self.num_segments != 1:\n print('Use sparse sampling, change frame and stride')\n self.new_length = self.num_segments\n self.skip_length = 1\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise(RuntimeError(\"Found 0 video clips in subfolders of: \" + root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n while True:\n try:\n images = None\n if self.use_decord:\n directory, target = self.clips[index]\n if self.video_loader:\n if '.' in directory.split('/')[-1]:\n # data in the \"setting\" file already have extension, e.g., demo.mp4\n video_name = directory\n else:\n # data in the \"setting\" file do not have extension, e.g., demo\n # So we need to provide extension (i.e., .mp4) to complete the file name.\n video_name = '{}.{}'.format(directory, self.video_ext)\n\n video_name = os.path.join(self.prefix, video_name)\n if video_name.startswith('s3'):\n video_bytes = self.client.get(video_name)\n decord_vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n decord_vr = decord.VideoReader(video_name, num_threads=1, ctx=cpu(0))\n duration = len(decord_vr)\n \n segment_indices, skip_offsets = self._sample_train_indices(duration)\n images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets)\n \n else:\n video_name, total_frame, target = self.clips[index]\n video_name = os.path.join(self.prefix, video_name)\n\n segment_indices, skip_offsets = self._sample_train_indices(total_frame)\n frame_id_list = self._get_frame_id_list(total_frame, segment_indices, skip_offsets)\n images = []\n for idx in frame_id_list:\n frame_fname = os.path.join(video_name, self.name_pattern.format(idx))\n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n images.append(Image.fromarray(img)) \n if images is not None:\n break\n except Exception as e:\n print(\"Failed to load video from {} with error {}\".format(\n video_name, e))\n index = random.randint(0, len(self.clips) - 1)\n \n if self.num_sample > 1:\n process_data_list = []\n mask_list = []\n for _ in range(self.num_sample):\n process_data, mask = self.transform((images, None))\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0, 1)\n process_data_list.append(process_data)\n mask_list.append(mask)\n return process_data_list, mask_list\n else:\n process_data, mask = self.transform((images, None)) # T*C,H,W\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0, 1) # T*C,H,W -> T,C,H,W -> C,T,H,W\n return (process_data, mask)\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, directory, setting):\n if not os.path.exists(setting):\n raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \" % (setting)))\n clips = []\n\n print(f'Load dataset using decord: {self.use_decord}')\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(self.split)\n if len(line_info) < 2:\n raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line))\n if self.use_decord:\n # line format: video_path, video_label\n clip_path = os.path.join(line_info[0])\n target = int(line_info[1])\n item = (clip_path, target)\n else:\n # line format: video_path, video_duration, video_label\n clip_path = os.path.join(line_info[0])\n total_frame = int(line_info[1])\n target = int(line_info[2])\n item = (clip_path, total_frame, target)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)),\n average_duration)\n offsets = offsets + np.random.randint(average_duration,\n size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(np.random.randint(\n num_frames - self.skip_length + 1,\n size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n def _get_frame_id_list(self, duration, indices, skip_offsets):\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n return frame_id_list\n\n def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets):\n sampled_list = []\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n try:\n video_data = video_reader.get_batch(frame_id_list).asnumpy()\n sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)]\n except:\n raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration))\n return sampled_list" }, { "identifier": "VideoClsDataset", "path": "vbench/third_party/umt/datasets/kinetics.py", "snippet": "class VideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,\n args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n assert num_segment == 1\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) / 2\n spatial_start = int(spatial_step)\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[:, spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[:, :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1, chunk_nb=0):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n\n # handle temporal segments\n converted_len = int(self.clip_len * self.frame_sample_rate)\n seg_len = len(vr) // self.num_segment\n\n if self.mode == 'test':\n temporal_step = max(1.0 * (len(vr) - converted_len) / (self.test_num_segment - 1), 0)\n temporal_start = int(chunk_nb * temporal_step)\n\n bound = min(temporal_start + converted_len, len(vr))\n all_index = [x for x in range(temporal_start, bound, self.frame_sample_rate)]\n while len(all_index) < self.clip_len:\n all_index.append(all_index[-1])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n if self.mode == 'validation':\n end_idx = (seg_len - converted_len) // 2\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.clip_len)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i*seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "VideoClsDataset_sparse", "path": "vbench/third_party/umt/datasets/kinetics_sparse.py", "snippet": "class VideoClsDataset_sparse(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,\n args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n assert num_segment == 1\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=-1) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=-1)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=0)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=0)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) / 2\n spatial_start = int(spatial_step)\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[:, spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[:, :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def _get_seq_frames(self, video_size, num_frames, clip_idx=-1):\n seg_size = max(0., float(video_size - 1) / num_frames)\n max_frame = int(video_size) - 1\n seq = []\n # index from 1, must add 1\n if clip_idx == -1:\n for i in range(num_frames):\n start = int(np.round(seg_size * i))\n end = int(np.round(seg_size * (i + 1)))\n idx = min(random.randint(start, end), max_frame)\n seq.append(idx)\n else:\n num_segment = 1\n if self.mode == 'test':\n num_segment = self.test_num_segment\n duration = seg_size / (num_segment + 1)\n for i in range(num_frames):\n start = int(np.round(seg_size * i))\n frame_index = start + int(duration * (clip_idx + 1))\n idx = min(frame_index, max_frame)\n seq.append(idx)\n return seq\n\n def loadvideo_decord(self, sample, chunk_nb=0):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n\n all_index = self._get_seq_frames(len(vr), self.clip_len, clip_idx=chunk_nb)\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "SSVideoClsDataset", "path": "vbench/third_party/umt/datasets/ssv2.py", "snippet": "class SSVideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256,\n new_width=340, keep_aspect_ratio=True, num_segment=1,\n num_crop=1, test_num_segment=10, test_num_crop=3, args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n \n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb # 0/1\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::2, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::2, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n if self.mode == 'test':\n tick = len(vr) / float(self.num_segment)\n all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +\n [int(tick * x) for x in range(self.num_segment)]))\n while len(all_index) < (self.num_segment * self.test_num_segment):\n all_index.append(all_index[-1])\n all_index = np.sort(np.array(all_index))\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n elif self.mode == 'validation':\n tick = len(vr) / float(self.num_segment)\n all_index = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n average_duration = len(vr) // self.num_segment\n if average_duration > 0:\n all_index = list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,\n size=self.num_segment))\n elif len(vr) > self.num_segment:\n all_index = list(np.sort(np.random.randint(len(vr), size=self.num_segment)))\n else:\n all_index = list(np.zeros((self.num_segment,)))\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "SSRawFrameClsDataset", "path": "vbench/third_party/umt/datasets/ssv2.py", "snippet": "class SSRawFrameClsDataset(Dataset):\n \"\"\"Load your own raw frame classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256, new_width=340,\n keep_aspect_ratio=True, num_segment=1, num_crop=1, test_num_segment=10,\n test_num_crop=3, filename_tmpl='img_{:05}.jpg', args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.filename_tmpl = filename_tmpl\n self.args = args\n self.aug = False\n self.rand_erase = False\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\n \"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.total_frames = list(cleaned.values[:, 1])\n self.label_array = list(cleaned.values[:, -1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size,\n interpolation='bilinear'),\n CenterCrop(size=(self.crop_size,\n self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size),\n interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_total_frames = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n self.test_seg.append((ck, cp))\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_total_frames.append(self.total_frames[idx])\n self.test_label_array.append(self.label_array[idx])\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample,\n total_frame,\n sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during training\".format(\n sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample,\n total_frame,\n sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n\n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample, total_frame)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during validation\".\n format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_frame(sample, total_frame)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::self.test_num_segment, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::self.test_num_segment, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [transforms.ToPILImage()(frame) for frame in buffer]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C\n\n # T H W C\n buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False)\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def load_frame(self, sample, num_frames, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n if self.mode == 'test':\n tick = num_frames / float(self.num_segment)\n all_index = []\n for t_seg in range(self.test_num_segment):\n tmp_index = [\n int(t_seg * tick / self.test_num_segment + tick * x)\n for x in range(self.num_segment)\n ]\n all_index.extend(tmp_index)\n all_index = list(np.sort(np.array(all_index)))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1)) \n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n # handle temporal segments\n average_duration = num_frames // self.num_segment\n all_index = []\n if average_duration > 0:\n if self.mode == 'validation':\n all_index = list(\n np.multiply(list(range(self.num_segment)),\n average_duration) +\n np.ones(self.num_segment, dtype=int) *\n (average_duration // 2))\n else:\n all_index = list(\n np.multiply(list(range(self.num_segment)),\n average_duration) +\n np.random.randint(average_duration, size=self.num_segment))\n elif num_frames > self.num_segment:\n if self.mode == 'validation':\n all_index = list(range(self.num_segment))\n else:\n all_index = list(\n np.sort(\n np.random.randint(num_frames, size=self.num_segment)))\n else:\n all_index = [0] * (self.num_segment - num_frames) + list(\n range(num_frames))\n all_index = list(np.array(all_index))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1))\n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" } ]
import os from torchvision import transforms from .transforms import * from .masking_generator import TubeMaskingGenerator, RandomMaskingGenerator from .mae import VideoMAE from .kinetics import VideoClsDataset from .kinetics_sparse import VideoClsDataset_sparse from .ssv2 import SSVideoClsDataset, SSRawFrameClsDataset
14,352
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube':
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube':
self.masked_position_generator = TubeMaskingGenerator(
0
2023-11-27 12:41:46+00:00
16k
HyeonHo99/Video-Motion-Customization
showone/models/unet_3d_condition.py
[ { "identifier": "TransformerTemporalModel", "path": "showone/models/transformer_temporal.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to use in feed-forward.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlock` attention should contain a bias parameter.\n double_self_attention (`bool`, *optional*):\n Configure if each `TransformerBlock` should contain two self-attention layers.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states,\n encoder_hidden_states=None,\n timestep=None,\n class_labels=None,\n num_frames=1,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n \"\"\"\n The [`TransformerTemporal`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input hidden_states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.long`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is\n returned, otherwise a `tuple` where the first element is the sample tensor.\n \"\"\"\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, channel, num_frames)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)" }, { "identifier": "CrossAttnDownBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n attentions = []\n temp_attentions = []\n temp_convs = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n output_states = ()\n\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, **ckpt_kwargs,\n ).sample\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n num_frames: int = 1,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "DownBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, num_frames=1):\n output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "UNetMidBlock3DCrossAttn", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=1, #todo: transformer_layers_per_block?\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states" }, { "identifier": "UNetMidBlock3DSimpleCrossAttn", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DSimpleCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attention_head_dim=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n skip_time_act=False,\n only_cross_attention=False,\n cross_attention_norm=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n\n self.attention_head_dim = attention_head_dim\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n self.num_heads = in_channels // self.attention_head_dim\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n processor = (\n AttnAddedKVProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") else AttnAddedKVProcessor()\n )\n\n attentions.append(\n Attention(\n query_dim=in_channels,\n cross_attention_dim=in_channels,\n heads=self.num_heads,\n dim_head=self.attention_head_dim,\n added_kv_proj_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n bias=True,\n upcast_softmax=True,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n processor=processor,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n self.attention_head_dim,\n in_channels // self.attention_head_dim,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n\n if attention_mask is None:\n # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.\n mask = None if encoder_hidden_states is None else encoder_attention_mask\n else:\n # when attention_mask is defined: we don't even check for encoder_attention_mask.\n # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.\n # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.\n # then we can simplify this whole if/else block to:\n # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask\n mask = attention_mask\n\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=mask,\n **cross_attention_kwargs,\n )\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states" }, { "identifier": "UpBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "showone/models/unet_3d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock3D\")\n return SimpleCrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"ResnetDownsampleBlock3D\":\n return ResnetDownsampleBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "showone/models/unet_3d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"SimpleCrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock3D\")\n return SimpleCrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"ResnetUpsampleBlock3D\":\n return ResnetUpsampleBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin from .transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UNetMidBlock3DSimpleCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from diffusers.utils import WEIGHTS_NAME import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import os, json
13,182
for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ # count = len(self.attn_processors.keys()) # ignore temporal attention count = len({k: v for k, v in self.attn_processors.items() if "temp_" not in k}.keys()) # Show-1 original line #count = len(self.attn_processors.keys()) # --> If BoxDiff: use this line if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor") and "temp_" not in name: if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # Copyright 2023 The ModelScope Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from diffusers.models.transformer_temporal import TransformerTemporalModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, it will skip the normalization and activation layers in post-processing norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), mid_block_type: Optional[str] = "UNetMidBlock3DCrossAttn", up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, transfromer_in_opt: bool =False, ): super().__init__() self.sample_size = sample_size self.transformer_in_opt = transfromer_in_opt if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) if self.transformer_in_opt: self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=64, in_channels=block_out_channels[0], num_layers=1, ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn": self.mid_block = UNetMidBlock3DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) elif mid_block_type == "UNetMidBlock3DSimpleCrossAttn": self.mid_block = UNetMidBlock3DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ # count = len(self.attn_processors.keys()) # ignore temporal attention count = len({k: v for k, v in self.attn_processors.items() if "temp_" not in k}.keys()) # Show-1 original line #count = len(self.attn_processors.keys()) # --> If BoxDiff: use this line if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor") and "temp_" not in name: if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
2
2023-11-29 17:23:45+00:00
16k
xmu-xiaoma666/X-Dreamer
unet_2d_blocks.py
[ { "identifier": "AdaGroupNorm", "path": "attention.py", "snippet": "class AdaGroupNorm(nn.Module):\n \"\"\"\n GroupNorm layer modified to incorporate timestep embeddings.\n \"\"\"\n\n def __init__(\n self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5\n ):\n super().__init__()\n self.num_groups = num_groups\n self.eps = eps\n\n if act_fn is None:\n self.act = None\n else:\n self.act = get_activation(act_fn)\n\n self.linear = nn.Linear(embedding_dim, out_dim * 2)\n\n def forward(self, x, emb):\n if self.act:\n emb = self.act(emb)\n emb = self.linear(emb)\n emb = emb[:, :, None, None]\n scale, shift = emb.chunk(2, dim=1)\n\n x = F.group_norm(x, self.num_groups, eps=self.eps)\n x = x * (1 + scale) + shift\n return x" }, { "identifier": "Attention", "path": "attention_processor.py", "snippet": "class Attention(nn.Module):\n r\"\"\"\n A cross attention layer.\n\n Parameters:\n query_dim (`int`): The number of channels in the query.\n cross_attention_dim (`int`, *optional*):\n The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.\n heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention.\n dim_head (`int`, *optional*, defaults to 64): The number of channels in each head.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n bias (`bool`, *optional*, defaults to False):\n Set to `True` for the query, key, and value linear layers to contain a bias parameter.\n \"\"\"\n\n def __init__(\n self,\n query_dim: int,\n cross_attention_dim: Optional[int] = None,\n heads: int = 8,\n dim_head: int = 64,\n dropout: float = 0.0,\n bias=False,\n upcast_attention: bool = False,\n upcast_softmax: bool = False,\n cross_attention_norm: Optional[str] = None,\n cross_attention_norm_num_groups: int = 32,\n added_kv_proj_dim: Optional[int] = None,\n norm_num_groups: Optional[int] = None,\n spatial_norm_dim: Optional[int] = None,\n out_bias: bool = True,\n scale_qk: bool = True,\n only_cross_attention: bool = False,\n eps: float = 1e-5,\n rescale_output_factor: float = 1.0,\n residual_connection: bool = False,\n _from_deprecated_attn_block=False,\n processor: Optional[\"AttnProcessor\"] = None,\n ):\n super().__init__()\n inner_dim = dim_head * heads\n cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim\n self.upcast_attention = upcast_attention\n self.upcast_softmax = upcast_softmax\n self.rescale_output_factor = rescale_output_factor\n self.residual_connection = residual_connection\n self.dropout = dropout\n\n # we make use of this private variable to know whether this class is loaded\n # with an deprecated state dict so that we can convert it on the fly\n self._from_deprecated_attn_block = _from_deprecated_attn_block\n\n self.scale_qk = scale_qk\n self.scale = dim_head**-0.5 if self.scale_qk else 1.0\n\n self.heads = heads\n # for slice_size > 0 the attention score computation\n # is split across the batch axis to save memory\n # You can set slice_size with `set_attention_slice`\n self.sliceable_head_dim = heads\n\n self.added_kv_proj_dim = added_kv_proj_dim\n self.only_cross_attention = only_cross_attention\n\n if self.added_kv_proj_dim is None and self.only_cross_attention:\n raise ValueError(\n \"`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`.\"\n )\n\n if norm_num_groups is not None:\n self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True)\n else:\n self.group_norm = None\n\n if spatial_norm_dim is not None:\n self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim)\n else:\n self.spatial_norm = None\n\n if cross_attention_norm is None:\n self.norm_cross = None\n elif cross_attention_norm == \"layer_norm\":\n self.norm_cross = nn.LayerNorm(cross_attention_dim)\n elif cross_attention_norm == \"group_norm\":\n if self.added_kv_proj_dim is not None:\n # The given `encoder_hidden_states` are initially of shape\n # (batch_size, seq_len, added_kv_proj_dim) before being projected\n # to (batch_size, seq_len, cross_attention_dim). The norm is applied\n # before the projection, so we need to use `added_kv_proj_dim` as\n # the number of channels for the group norm.\n norm_cross_num_channels = added_kv_proj_dim\n else:\n norm_cross_num_channels = cross_attention_dim\n\n self.norm_cross = nn.GroupNorm(\n num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True\n )\n else:\n raise ValueError(\n f\"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'\"\n )\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=bias)\n\n if not self.only_cross_attention:\n # only relevant for the `AddedKVProcessor` classes\n self.to_k = nn.Linear(cross_attention_dim, inner_dim, bias=bias)\n self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias)\n else:\n self.to_k = None\n self.to_v = None\n\n if self.added_kv_proj_dim is not None:\n self.add_k_proj = nn.Linear(added_kv_proj_dim, inner_dim)\n self.add_v_proj = nn.Linear(added_kv_proj_dim, inner_dim)\n\n self.to_out = nn.ModuleList([])\n self.to_out.append(nn.Linear(inner_dim, query_dim, bias=out_bias))\n self.to_out.append(nn.Dropout(dropout))\n\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n if processor is None:\n processor = (\n AttnProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk else AttnProcessor()\n )\n self.set_processor(processor)\n\n def set_use_memory_efficient_attention_xformers(\n self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None\n ):\n is_lora = hasattr(self, \"processor\") and isinstance(\n self.processor,\n LORA_ATTENTION_PROCESSORS,\n )\n is_custom_diffusion = hasattr(self, \"processor\") and isinstance(\n self.processor, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor)\n )\n is_added_kv_processor = hasattr(self, \"processor\") and isinstance(\n self.processor,\n (\n AttnAddedKVProcessor,\n AttnAddedKVProcessor2_0,\n SlicedAttnAddedKVProcessor,\n XFormersAttnAddedKVProcessor,\n LoRAAttnAddedKVProcessor,\n ),\n )\n\n if use_memory_efficient_attention_xformers:\n if is_added_kv_processor and (is_lora or is_custom_diffusion):\n raise NotImplementedError(\n f\"Memory efficient attention is currently not supported for LoRA or custom diffuson for attention processor type {self.processor}\"\n )\n if not is_xformers_available():\n raise ModuleNotFoundError(\n (\n \"Refer to https://github.com/facebookresearch/xformers for more information on how to install\"\n \" xformers\"\n ),\n name=\"xformers\",\n )\n elif not torch.cuda.is_available():\n raise ValueError(\n \"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is\"\n \" only available for GPU \"\n )\n else:\n try:\n # Make sure we can run the memory efficient attention\n _ = xformers.ops.memory_efficient_attention(\n torch.randn((1, 2, 40), device=\"cuda\"),\n torch.randn((1, 2, 40), device=\"cuda\"),\n torch.randn((1, 2, 40), device=\"cuda\"),\n )\n except Exception as e:\n raise e\n\n if is_lora:\n # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers\n # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0?\n processor = LoRAXFormersAttnProcessor(\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n rank=self.processor.rank,\n attention_op=attention_op,\n )\n processor.load_state_dict(self.processor.state_dict())\n processor.to(self.processor.to_q_lora.up.weight.device)\n elif is_custom_diffusion:\n processor = CustomDiffusionXFormersAttnProcessor(\n train_kv=self.processor.train_kv,\n train_q_out=self.processor.train_q_out,\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n attention_op=attention_op,\n )\n processor.load_state_dict(self.processor.state_dict())\n if hasattr(self.processor, \"to_k_custom_diffusion\"):\n processor.to(self.processor.to_k_custom_diffusion.weight.device)\n elif is_added_kv_processor:\n # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP\n # which uses this type of cross attention ONLY because the attention mask of format\n # [0, ..., -10.000, ..., 0, ...,] is not supported\n # throw warning\n logger.info(\n \"Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation.\"\n )\n processor = XFormersAttnAddedKVProcessor(attention_op=attention_op)\n else:\n processor = XFormersAttnProcessor(attention_op=attention_op)\n else:\n if is_lora:\n attn_processor_class = (\n LoRAAttnProcessor2_0 if hasattr(F, \"scaled_dot_product_attention\") else LoRAAttnProcessor\n )\n processor = attn_processor_class(\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n rank=self.processor.rank,\n )\n processor.load_state_dict(self.processor.state_dict())\n processor.to(self.processor.to_q_lora.up.weight.device)\n elif is_custom_diffusion:\n processor = CustomDiffusionAttnProcessor(\n train_kv=self.processor.train_kv,\n train_q_out=self.processor.train_q_out,\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n )\n processor.load_state_dict(self.processor.state_dict())\n if hasattr(self.processor, \"to_k_custom_diffusion\"):\n processor.to(self.processor.to_k_custom_diffusion.weight.device)\n else:\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n processor = (\n AttnProcessor2_0()\n if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk\n else AttnProcessor()\n )\n\n self.set_processor(processor)\n\n def set_attention_slice(self, slice_size):\n if slice_size is not None and slice_size > self.sliceable_head_dim:\n raise ValueError(f\"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.\")\n\n if slice_size is not None and self.added_kv_proj_dim is not None:\n processor = SlicedAttnAddedKVProcessor(slice_size)\n elif slice_size is not None:\n processor = SlicedAttnProcessor(slice_size)\n elif self.added_kv_proj_dim is not None:\n processor = AttnAddedKVProcessor()\n else:\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n processor = (\n AttnProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk else AttnProcessor()\n )\n\n self.set_processor(processor)\n\n def set_processor(self, processor: \"AttnProcessor\"):\n # if current processor is in `self._modules` and if passed `processor` is not, we need to\n # pop `processor` from `self._modules`\n if (\n hasattr(self, \"processor\")\n and isinstance(self.processor, torch.nn.Module)\n and not isinstance(processor, torch.nn.Module)\n ):\n logger.info(f\"You are removing possibly trained weights of {self.processor} with {processor}\")\n self._modules.pop(\"processor\")\n\n self.processor = processor\n\n def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, index=None, came_posfeat=None, **cross_attention_kwargs):\n # The `Attention` class can call different attention processors / attention functions\n # here we simply pass along all tensors to the selected processor class\n # For standard processors that are defined here, `**cross_attention_kwargs` is empty\n return self.processor(\n self,\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n ####################################\n index=index,\n came_posfeat = came_posfeat,\n ###########################################\n )\n\n def batch_to_head_dim(self, tensor):\n head_size = self.heads\n batch_size, seq_len, dim = tensor.shape\n tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)\n tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)\n return tensor\n\n def head_to_batch_dim(self, tensor, out_dim=3):\n head_size = self.heads\n batch_size, seq_len, dim = tensor.shape\n tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)\n tensor = tensor.permute(0, 2, 1, 3)\n\n if out_dim == 3:\n tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)\n\n return tensor\n\n def get_attention_scores(self, query, key, attention_mask=None):\n dtype = query.dtype\n if self.upcast_attention:\n query = query.float()\n key = key.float()\n\n if attention_mask is None:\n baddbmm_input = torch.empty(\n query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device\n )\n beta = 0\n else:\n baddbmm_input = attention_mask\n beta = 1\n\n attention_scores = torch.baddbmm(\n baddbmm_input,\n query,\n key.transpose(-1, -2),\n beta=beta,\n alpha=self.scale,\n )\n del baddbmm_input\n\n if self.upcast_softmax:\n attention_scores = attention_scores.float()\n\n attention_probs = attention_scores.softmax(dim=-1)\n del attention_scores\n\n attention_probs = attention_probs.to(dtype)\n\n return attention_probs\n\n\n def get_attention_scores_for_query(self, query, key, attention_mask=None):\n dtype = query.dtype\n if self.upcast_attention:\n query = query.float()\n key = key.float()\n\n if attention_mask is None:\n baddbmm_input = torch.empty(\n query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device\n )\n beta = 0\n else:\n baddbmm_input = attention_mask\n beta = 1\n\n attention_scores = torch.baddbmm(\n baddbmm_input,\n query,\n key.transpose(-1, -2),\n beta=beta,\n alpha=self.scale,\n )\n del baddbmm_input\n\n if self.upcast_softmax:\n attention_scores = attention_scores.float()\n\n # attention_probs = attention_scores.softmax(dim=-2)\n attention_probs = attention_scores\n # del attention_scores\n\n attention_probs = attention_probs.to(dtype)\n\n return attention_probs\n\n def prepare_attention_mask(self, attention_mask, target_length, batch_size=None, out_dim=3):\n if batch_size is None:\n deprecate(\n \"batch_size=None\",\n \"0.0.15\",\n (\n \"Not passing the `batch_size` parameter to `prepare_attention_mask` can lead to incorrect\"\n \" attention mask preparation and is deprecated behavior. Please make sure to pass `batch_size` to\"\n \" `prepare_attention_mask` when preparing the attention_mask.\"\n ),\n )\n batch_size = 1\n\n head_size = self.heads\n if attention_mask is None:\n return attention_mask\n\n current_length: int = attention_mask.shape[-1]\n if current_length != target_length:\n if attention_mask.device.type == \"mps\":\n # HACK: MPS: Does not support padding by greater than dimension of input tensor.\n # Instead, we can manually construct the padding tensor.\n padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length)\n padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat([attention_mask, padding], dim=2)\n else:\n # TODO: for pipelines such as stable-diffusion, padding cross-attn mask:\n # we want to instead pad by (0, remaining_length), where remaining_length is:\n # remaining_length: int = target_length - current_length\n # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding\n attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)\n\n if out_dim == 3:\n if attention_mask.shape[0] < batch_size * head_size:\n attention_mask = attention_mask.repeat_interleave(head_size, dim=0)\n elif out_dim == 4:\n attention_mask = attention_mask.unsqueeze(1)\n attention_mask = attention_mask.repeat_interleave(head_size, dim=1)\n\n return attention_mask\n\n def norm_encoder_hidden_states(self, encoder_hidden_states):\n assert self.norm_cross is not None, \"self.norm_cross must be defined to call self.norm_encoder_hidden_states\"\n\n if isinstance(self.norm_cross, nn.LayerNorm):\n encoder_hidden_states = self.norm_cross(encoder_hidden_states)\n elif isinstance(self.norm_cross, nn.GroupNorm):\n # Group norm norms along the channels dimension and expects\n # input to be in the shape of (N, C, *). In this case, we want\n # to norm along the hidden dimension, so we need to move\n # (batch_size, sequence_length, hidden_size) ->\n # (batch_size, hidden_size, sequence_length)\n encoder_hidden_states = encoder_hidden_states.transpose(1, 2)\n encoder_hidden_states = self.norm_cross(encoder_hidden_states)\n encoder_hidden_states = encoder_hidden_states.transpose(1, 2)\n else:\n assert False\n\n return encoder_hidden_states" }, { "identifier": "AttnAddedKVProcessor", "path": "attention_processor.py", "snippet": "class AttnAddedKVProcessor:\n r\"\"\"\n Processor for performing attention-related computations with extra learnable key and value matrices for the text\n encoder.\n \"\"\"\n\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n residual = hidden_states\n hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2)\n batch_size, sequence_length, _ = hidden_states.shape\n\n attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n\n if encoder_hidden_states is None:\n encoder_hidden_states = hidden_states\n elif attn.norm_cross:\n encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n\n query = attn.to_q(hidden_states)\n query = attn.head_to_batch_dim(query)\n\n encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)\n encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)\n encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj)\n encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj)\n\n if not attn.only_cross_attention:\n key = attn.to_k(hidden_states)\n value = attn.to_v(hidden_states)\n key = attn.head_to_batch_dim(key)\n value = attn.head_to_batch_dim(value)\n key = torch.cat([encoder_hidden_states_key_proj, key], dim=1)\n value = torch.cat([encoder_hidden_states_value_proj, value], dim=1)\n else:\n key = encoder_hidden_states_key_proj\n value = encoder_hidden_states_value_proj\n\n attention_probs = attn.get_attention_scores(query, key, attention_mask)\n hidden_states = torch.bmm(attention_probs, value)\n hidden_states = attn.batch_to_head_dim(hidden_states)\n\n # linear proj\n hidden_states = attn.to_out[0](hidden_states)\n # dropout\n hidden_states = attn.to_out[1](hidden_states)\n\n hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape)\n hidden_states = hidden_states + residual\n\n return hidden_states" }, { "identifier": "AttnAddedKVProcessor2_0", "path": "attention_processor.py", "snippet": "class AttnAddedKVProcessor2_0:\n r\"\"\"\n Processor for performing scaled dot-product attention (enabled by default if you're using PyTorch 2.0), with extra\n learnable key and value matrices for the text encoder.\n \"\"\"\n\n def __init__(self):\n if not hasattr(F, \"scaled_dot_product_attention\"):\n raise ImportError(\n \"AttnAddedKVProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.\"\n )\n\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n residual = hidden_states\n hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2)\n batch_size, sequence_length, _ = hidden_states.shape\n\n attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size, out_dim=4)\n\n if encoder_hidden_states is None:\n encoder_hidden_states = hidden_states\n elif attn.norm_cross:\n encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n\n query = attn.to_q(hidden_states)\n query = attn.head_to_batch_dim(query, out_dim=4)\n\n encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)\n encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)\n encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj, out_dim=4)\n encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4)\n\n if not attn.only_cross_attention:\n key = attn.to_k(hidden_states)\n value = attn.to_v(hidden_states)\n key = attn.head_to_batch_dim(key, out_dim=4)\n value = attn.head_to_batch_dim(value, out_dim=4)\n key = torch.cat([encoder_hidden_states_key_proj, key], dim=2)\n value = torch.cat([encoder_hidden_states_value_proj, value], dim=2)\n else:\n key = encoder_hidden_states_key_proj\n value = encoder_hidden_states_value_proj\n\n # the output of sdp = (batch, num_heads, seq_len, head_dim)\n # TODO: add support for attn.scale when we move to Torch 2.1\n hidden_states = F.scaled_dot_product_attention(\n query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False\n )\n hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1])\n\n # linear proj\n hidden_states = attn.to_out[0](hidden_states)\n # dropout\n hidden_states = attn.to_out[1](hidden_states)\n\n hidden_states = hidden_states.transpose(-1, -2).reshape(residual.shape)\n hidden_states = hidden_states + residual\n\n return hidden_states" }, { "identifier": "Transformer2DModel", "path": "transformer_2d.py", "snippet": "class Transformer2DModel(ModelMixin, ConfigMixin):\n \"\"\"\n A 2D Transformer model for image-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n num_vector_embeds (`int`, *optional*):\n The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).\n Includes the class for the masked latent pixel.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to use in feed-forward.\n num_embeds_ada_norm ( `int`, *optional*):\n The number of diffusion steps used during training. Pass if at least one of the norm_layers is\n `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are\n added to the hidden states.\n\n During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlocks` attention should contain a bias parameter.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n num_vector_embeds: Optional[int] = None,\n patch_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n double_self_attention: bool = False,\n upcast_attention: bool = False,\n norm_type: str = \"layer_norm\",\n norm_elementwise_affine: bool = True,\n attention_type: str = \"default\",\n ):\n super().__init__()\n self.use_linear_projection = use_linear_projection\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`\n # Define whether input is continuous or discrete depending on configuration\n self.is_input_continuous = (in_channels is not None) and (patch_size is None)\n self.is_input_vectorized = num_vector_embeds is not None\n self.is_input_patches = in_channels is not None and patch_size is not None\n\n if norm_type == \"layer_norm\" and num_embeds_ada_norm is not None:\n deprecation_message = (\n f\"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or\"\n \" incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config.\"\n \" Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect\"\n \" results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it\"\n \" would be very nice if you could open a Pull request for the `transformer/config.json` file\"\n )\n deprecate(\"norm_type!=num_embeds_ada_norm\", \"1.0.0\", deprecation_message, standard_warn=False)\n norm_type = \"ada_norm\"\n\n if self.is_input_continuous and self.is_input_vectorized:\n raise ValueError(\n f\"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make\"\n \" sure that either `in_channels` or `num_vector_embeds` is None.\"\n )\n elif self.is_input_vectorized and self.is_input_patches:\n raise ValueError(\n f\"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make\"\n \" sure that either `num_vector_embeds` or `num_patches` is None.\"\n )\n elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:\n raise ValueError(\n f\"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:\"\n f\" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None.\"\n )\n\n # 2. Define input layers\n if self.is_input_continuous:\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n if use_linear_projection:\n self.proj_in = LoRACompatibleLinear(in_channels, inner_dim)\n else:\n self.proj_in = LoRACompatibleConv(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n elif self.is_input_vectorized:\n assert sample_size is not None, \"Transformer2DModel over discrete input must provide sample_size\"\n assert num_vector_embeds is not None, \"Transformer2DModel over discrete input must provide num_embed\"\n\n self.height = sample_size\n self.width = sample_size\n self.num_vector_embeds = num_vector_embeds\n self.num_latent_pixels = self.height * self.width\n\n self.latent_image_embedding = ImagePositionalEmbeddings(\n num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width\n )\n elif self.is_input_patches:\n assert sample_size is not None, \"Transformer2DModel over patched input must provide sample_size\"\n\n self.height = sample_size\n self.width = sample_size\n\n self.patch_size = patch_size\n self.pos_embed = PatchEmbed(\n height=sample_size,\n width=sample_size,\n patch_size=patch_size,\n in_channels=in_channels,\n embed_dim=inner_dim,\n )\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n double_self_attention=double_self_attention,\n upcast_attention=upcast_attention,\n norm_type=norm_type,\n norm_elementwise_affine=norm_elementwise_affine,\n attention_type=attention_type,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n self.out_channels = in_channels if out_channels is None else out_channels\n if self.is_input_continuous:\n # TODO: should use out_channels for continuous projections\n if use_linear_projection:\n self.proj_out = LoRACompatibleLinear(inner_dim, in_channels)\n else:\n self.proj_out = LoRACompatibleConv(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)\n elif self.is_input_vectorized:\n self.norm_out = nn.LayerNorm(inner_dim)\n self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1)\n elif self.is_input_patches:\n self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)\n self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim)\n self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n class_labels: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ########################################\n index: Optional[torch.FloatTensor] = None,\n came_posfeat:Optional[torch.FloatTensor] = None,\n ###########################################\n ):\n \"\"\"\n The [`Transformer2DModel`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input `hidden_states`.\n encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.LongTensor`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n encoder_attention_mask ( `torch.Tensor`, *optional*):\n Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:\n\n * Mask `(batch, sequence_length)` True = keep, False = discard.\n * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.\n\n If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format\n above. This bias will be added to the cross-attention scores.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a\n `tuple` where the first element is the sample tensor.\n \"\"\"\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.\n # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.\n # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None and attention_mask.ndim == 2:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:\n encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # 1. Input\n if self.is_input_continuous:\n batch, _, height, width = hidden_states.shape\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n if not self.use_linear_projection:\n hidden_states = self.proj_in(hidden_states)\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)\n else:\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)\n hidden_states = self.proj_in(hidden_states)\n elif self.is_input_vectorized:\n hidden_states = self.latent_image_embedding(hidden_states)\n elif self.is_input_patches:\n hidden_states = self.pos_embed(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n if self.training and self.gradient_checkpointing:\n hidden_states = torch.utils.checkpoint.checkpoint(\n block,\n hidden_states,\n attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n timestep,\n cross_attention_kwargs,\n class_labels,\n use_reentrant=False,\n )\n else:\n hidden_states, attention_map = block(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n ############################\n index=index,\n came_posfeat=came_posfeat\n ############################\n )\n\n # 3. Output\n if self.is_input_continuous:\n if not self.use_linear_projection:\n hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n hidden_states = self.proj_out(hidden_states)\n else:\n hidden_states = self.proj_out(hidden_states)\n hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n\n output = hidden_states + residual\n elif self.is_input_vectorized:\n hidden_states = self.norm_out(hidden_states)\n logits = self.out(hidden_states)\n # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)\n logits = logits.permute(0, 2, 1)\n\n # log(p(x_0))\n output = F.log_softmax(logits.double(), dim=1).float()\n elif self.is_input_patches:\n # TODO: cleanup!\n conditioning = self.transformer_blocks[0].norm1.emb(\n timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)\n hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]\n hidden_states = self.proj_out_2(hidden_states)\n\n # unpatchify\n height = width = int(hidden_states.shape[1] ** 0.5)\n hidden_states = hidden_states.reshape(\n shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)\n )\n hidden_states = torch.einsum(\"nhwpqc->nchpwq\", hidden_states)\n output = hidden_states.reshape(\n shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)\n )\n\n if not return_dict:\n return (output,), attention_map\n\n return Transformer2DModelOutput(sample=output), attention_map" } ]
from typing import Any, Dict, Optional, Tuple from torch import nn from diffusers.utils import is_torch_version, logging from diffusers.models.activations import get_activation from diffusers.models.dual_transformer_2d import DualTransformer2DModel from attention import AdaGroupNorm from attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0 from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D from transformer_2d import Transformer2DModel import numpy as np import torch import torch.nn.functional as F
12,400
output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, ################################################### index: Optional[torch.FloatTensor] = None, came_posfeat: Optional[torch.FloatTensor] = None, ################################################### ) -> torch.FloatTensor: hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs, ) else: hidden_states, attention_map = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, ######################### index=index, came_posfeat = came_posfeat, ############################# )#[0] ########################################## hidden_states = hidden_states[0] ############################################ hidden_states = resnet(hidden_states, temb) return hidden_states, attention_map class UNetMidBlock2DSimpleCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim=1, output_scale_factor=1.0, cross_attention_dim=1280, skip_time_act=False, only_cross_attention=False, cross_attention_norm=None, ): super().__init__() self.has_cross_attention = True self.attention_head_dim = attention_head_dim resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.num_heads = in_channels // self.attention_head_dim # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ] attentions = [] for _ in range(num_layers): processor = (
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from ..utils import is_torch_version, logging # from .activations import get_activation # from .dual_transformer_2d import DualTransformer2DModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, transformer_layers_per_block=1, num_attention_heads=None, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", attention_type="default", resnet_skip_time_act=False, resnet_out_scale_factor=1.0, cross_attention_norm=None, attention_head_dim=None, downsample_type=None, ): # If attn head dim is not defined, we default it to the number of heads if attention_head_dim is None: logger.warn( f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}." ) attention_head_dim = num_attention_heads down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type if down_block_type == "DownBlock2D": return DownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "ResnetDownsampleBlock2D": return ResnetDownsampleBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, ) elif down_block_type == "AttnDownBlock2D": if add_downsample is False: downsample_type = None else: downsample_type = downsample_type or "conv" # default to 'conv' return AttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, downsample_type=downsample_type, ) elif down_block_type == "CrossAttnDownBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") return CrossAttnDownBlock2D( num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, ) elif down_block_type == "SimpleCrossAttnDownBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D") return SimpleCrossAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif down_block_type == "SkipDownBlock2D": return SkipDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "AttnSkipDownBlock2D": return AttnSkipDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "DownEncoderBlock2D": return DownEncoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "AttnDownEncoderBlock2D": return AttnDownEncoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "KDownBlock2D": return KDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif down_block_type == "KCrossAttnDownBlock2D": return KCrossAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, add_self_attention=True if not add_downsample else False, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, transformer_layers_per_block=1, num_attention_heads=None, resnet_groups=None, cross_attention_dim=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", attention_type="default", resnet_skip_time_act=False, resnet_out_scale_factor=1.0, cross_attention_norm=None, attention_head_dim=None, upsample_type=None, ): # If attn head dim is not defined, we default it to the number of heads if attention_head_dim is None: logger.warn( f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}." ) attention_head_dim = num_attention_heads up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type if up_block_type == "UpBlock2D": return UpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "ResnetUpsampleBlock2D": return ResnetUpsampleBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, ) elif up_block_type == "CrossAttnUpBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") return CrossAttnUpBlock2D( num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, ) elif up_block_type == "SimpleCrossAttnUpBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D") return SimpleCrossAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif up_block_type == "AttnUpBlock2D": if add_upsample is False: upsample_type = None else: upsample_type = upsample_type or "conv" # default to 'conv' return AttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type, ) elif up_block_type == "SkipUpBlock2D": return SkipUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "AttnSkipUpBlock2D": return AttnSkipUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "UpDecoderBlock2D": return UpDecoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels, ) elif up_block_type == "AttnUpDecoderBlock2D": return AttnUpDecoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels, ) elif up_block_type == "KUpBlock2D": return KUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif up_block_type == "KCrossAttnUpBlock2D": return KCrossAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, ) raise ValueError(f"{up_block_type} does not exist.") class AutoencoderTinyBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int, act_fn: str): super().__init__() act_fn = get_activation(act_fn) self.conv = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), ) self.skip = ( nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) if in_channels != out_channels else nn.Identity() ) self.fuse = nn.ReLU() def forward(self, x): return self.fuse(self.conv(x) + self.skip(x)) class UNetMidBlock2D(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", # default, spatial resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, add_attention: bool = True, attention_head_dim=1, output_scale_factor=1.0, ): super().__init__() resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.add_attention = add_attention # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] if attention_head_dim is None: logger.warn( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." ) attention_head_dim = in_channels for _ in range(num_layers): if self.add_attention: attentions.append( Attention( in_channels, heads=in_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups if resnet_time_scale_shift == "default" else None, spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) else: attentions.append(None) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states, temb=None): hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if attn is not None: hidden_states = attn(hidden_states, temb=temb) hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads=1, output_scale_factor=1.0, cross_attention_dim=1280, dual_cross_attention=False, use_linear_projection=False, upcast_attention=False, attention_type="default", ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for _ in range(num_layers): if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, ################################################### index: Optional[torch.FloatTensor] = None, came_posfeat: Optional[torch.FloatTensor] = None, ################################################### ) -> torch.FloatTensor: hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs, ) else: hidden_states, attention_map = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, ######################### index=index, came_posfeat = came_posfeat, ############################# )#[0] ########################################## hidden_states = hidden_states[0] ############################################ hidden_states = resnet(hidden_states, temb) return hidden_states, attention_map class UNetMidBlock2DSimpleCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim=1, output_scale_factor=1.0, cross_attention_dim=1280, skip_time_act=False, only_cross_attention=False, cross_attention_norm=None, ): super().__init__() self.has_cross_attention = True self.attention_head_dim = attention_head_dim resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.num_heads = in_channels // self.attention_head_dim # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ] attentions = [] for _ in range(num_layers): processor = (
AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor()
3
2023-11-27 13:44:01+00:00
16k
zhenzhiwang/intercontrol
eval/eval_controlmdm.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = th.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = th.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = th.add(th.mul(data, std), mean)\n return output\n \n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n \"\"\"\n overrides q_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n\n bs, feat, _, frames = noise.shape\n noise *= 1. #- model_kwargs['y']['inpainting_mask']\n\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def global_joint_bfgs_optimize(self, x, model_kwargs=None):\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.humanml_to_global_joint(x)\n cond_joint = model_kwargs['y']['global_joint']\n mask = model_kwargs['y']['global_joint_mask']\n pred_joint = th.masked_select(pred_joint, mask.bool())\n cond_joint = th.masked_select(cond_joint, mask.bool())\n assert pred_joint.shape == cond_joint.shape, f\"pred_joint: {pred_joint.shape}, cond_joint: {cond_joint.shape}\"\n loss = self.mse_loss(pred_joint, cond_joint)\n return loss\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert pred_joint.shape[1] == 1\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n return pred_joint\n \n def global_joint_position_conditioning(self, x, model_kwargs=None):\n n_joints = 22 if x.shape[1] == 263 else 21\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n #pred_joint.requires_grad = True\n assert pred_joint.shape == model_kwargs['y']['global_joint'].shape == model_kwargs['y']['global_joint_mask'].shape, f\"pred_joint: {pred_joint.shape}, global_joint: {model_kwargs['y']['global_joint'].shape}, global_joint_mask: {model_kwargs['y']['global_joint_mask'].shape}\"\n loss = self.global_joint_condition_loss(pred_joint, model_kwargs['y']['global_joint'], model_kwargs['y']['global_joint_mask'])\n diff_scale = ((pred_joint.clamp(min=1e-4) - model_kwargs['y']['global_joint'].clamp(min=1e-4)).abs() / model_kwargs['y']['global_joint'].clamp(min=1e-4).abs()).mean().item()\n #loss.requires_grad = True\n gradient = th.autograd.grad(loss, x, \n grad_outputs=th.ones_like(loss),\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradient.clone().detach(), loss.item(), diff_scale\n\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n use_posterior=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n #assert use_posterior == False\n p_mean_variance_func = self.p_mean_variance_bfgs_posterior if use_posterior else self.p_mean_variance_bfgs_x0\n out = p_mean_variance_func(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n k_first = self.bfgs_times_first,\n k_last = self.bfgs_times_last,\n )\n \n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n \n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n \n def condition_mean_with_grad(self, cond_fn, x_mean, x_var, t, strength, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n with th.enable_grad():\n x_mean = x_mean.clone().detach().requires_grad_(True)\n gradient, loss_value, diff_scale = cond_fn(x_mean, model_kwargs) # p_mean_var[\"mean\"]\n gradient_guidance = - strength * gradient.float() # x_var.clamp(min = 0.01) \n new_mean = (x_mean + gradient_guidance).clone().detach()\n return new_mean, loss_value, gradient_guidance.clone().detach().abs().cpu(), x_mean.clone().detach().abs().cpu(), diff_scale\n\n\n def condition_mean_bfgs(self, x_mean, num_condition, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n \n with th.enable_grad():\n x_mean = x_mean.clone().detach().contiguous().requires_grad_(True)\n def closure():\n lbfgs.zero_grad()\n objective = self.global_joint_bfgs_optimize(x_mean, model_kwargs)\n objective.backward()\n return objective\n lbfgs = optim.LBFGS([x_mean],\n history_size=10, \n max_iter=4, \n line_search_fn=\"strong_wolfe\")\n for _ in range(num_condition):\n lbfgs.step(closure)\n #loss_value = self.global_joint_bfgs_optimize(x_mean, model_kwargs).item()\n return x_mean #, loss_value\n\n def p_mean_variance_bfgs_x0(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_output = self.condition_mean_bfgs(model_output, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n \n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def p_mean_variance_bfgs_posterior(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_mean = self.condition_mean_bfgs(model_mean, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, dataset=None,\n use_posterior = True,\n k_first = 1,\n k_last = 10,\n t_threshold = 10,):\n \"\"\"\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key \"loss\" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n \"\"\"\n\n # enc = model.model._modules['module']\n model = self._wrap_model(model)\n \n enc = model.model\n mask = model_kwargs['y']['mask']\n get_xyz = lambda sample: enc.rot2xyz(sample, mask=None, pose_rep=enc.pose_rep, translation=enc.translation,\n glob=enc.glob,\n # jointstype='vertices', # 3.4 iter/sec # USED ALSO IN MotionCLIP\n jointstype='smpl', # 3.4 iter/sec\n vertstrans=False)\n\n if model_kwargs is None:\n model_kwargs = {}\n if noise is None:\n noise = th.randn_like(x_start)\n x_t = self.q_sample(x_start, t, noise=noise, model_kwargs=model_kwargs)\n \n #assert k_first == 1, \"k_first must be 1, {}\".format(k_first)\n #assert k_last == 10, \"k_last must be 10, {}\".format(k_last)\n assert use_posterior == True, \"use_posterior must be True, {}\".format(use_posterior)\n if use_posterior:\n '''\n # loss-guided condition in training time\n if t[0] >= t_threshold:\n assert (t >= t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n num_condition = k_first # else k_last\n else:\n num_condition = k_last\n assert (t < t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n '''\n num_condition = k_first\n x_t = self.condition_mean_bfgs(x_t, num_condition, model_kwargs=model_kwargs)\n\n terms = {}\n if self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:\n model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)\n\n target = {\n ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )[0],\n ModelMeanType.START_X: x_start,\n ModelMeanType.EPSILON: noise,\n }[self.model_mean_type]\n\n assert model_output.shape == target.shape == x_start.shape, \"model_output {}, target {}, x_start {}\".format(model_output.shape ,target.shape ,x_start.shape) # [bs, njoints, nfeats, nframes]\n\n terms[\"rot_mse\"] = self.masked_l2(target, model_output, mask) # mean_flat(rot_mse)\n\n terms[\"loss\"] = terms[\"rot_mse\"] + terms.get('vb', 0.) +\\\n (self.lambda_vel * terms.get('vel_mse', 0.)) +\\\n (self.lambda_rcxyz * terms.get('rcxyz_mse', 0.)) + \\\n (self.lambda_fc * terms.get('fc', 0.))\n else:\n raise NotImplementedError(self.loss_type)\n\n return terms" }, { "identifier": "SpacedDiffusion", "path": "diffusion/respace.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "evaluation_inpainting_parser", "path": "utils/parser_util.py", "snippet": "def evaluation_inpainting_parser():\n parser = ArgumentParser()\n # args specified by the user: (all other will be loaded from the model)\n add_base_options(parser)\n add_evaluation_options(parser)\n add_inpainting_options(parser)\n add_interactive_options(parser)\n return parse_and_load_from_model(parser)" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "get_mdm_loader", "path": "data_loaders/humanml/motion_loaders/model_motion_loaders.py", "snippet": "def get_mdm_loader(args, model, diffusion, batch_size, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale, num_unfoldings=0):\n opt = {\n 'name': 'test', # FIXME\n }\n print('Generating %s ...' % opt['name'])\n # dataset = CompMDMGeneratedDataset(opt, ground_truth_dataset, ground_truth_dataset.w_vectorizer, mm_num_samples, mm_num_repeats)\n if hasattr(args, \"inpainting_mask\") and args.inpainting_mask == 'global_joint':\n dataset = CompMDMControlGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale)\n elif hasattr(args, \"inpainting_mask\") and args.inpainting_mask != '':\n dataset = CompMDMInpaintingGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale)\n elif num_unfoldings > 1:\n dataset = CompMDMUnfoldingGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale, num_unfoldings)\n else:\n dataset = CompMDMGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale)\n\n mm_dataset = MMGeneratedDataset(opt, dataset, ground_truth_loader.dataset.w_vectorizer)\n\n # NOTE: bs must not be changed! this will cause a bug in R precision calc!\n motion_loader = DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn, drop_last=True, num_workers=4)\n mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=4)\n\n print('Generated Dataset Loading Completed!!!')\n\n return motion_loader, mm_motion_loader" }, { "identifier": "EvaluatorMDMWrapper", "path": "data_loaders/humanml/networks/evaluator_wrapper.py", "snippet": "class EvaluatorMDMWrapper(object):\n\n def __init__(self, dataset_name, device):\n opt = {\n 'dataset_name': dataset_name,\n 'device': device,\n 'dim_word': 300,\n 'max_motion_length': 196,\n 'dim_pos_ohot': len(POS_enumerator),\n 'dim_motion_hidden': 1024,\n 'max_text_len': 20,\n 'dim_text_hidden': 512,\n 'dim_coemb_hidden': 512,\n 'dim_pose': 263 if dataset_name == 'humanml' else 251,\n 'dim_movement_enc_hidden': 512,\n 'dim_movement_latent': 512,\n 'checkpoints_dir': '.',\n 'unit_length': 4,\n 'foot_contact_entries': 4,\n }\n\n if opt['dataset_name'] == 'babel':\n opt['dim_pose'] = 135\n opt['foot_contact_entries'] = 0\n\n self.text_encoder, self.motion_encoder, self.movement_encoder = build_evaluators(opt)\n self.opt = opt\n self.device = opt['device']\n\n self.text_encoder.to(opt['device'])\n self.motion_encoder.to(opt['device'])\n self.movement_encoder.to(opt['device'])\n\n self.text_encoder.eval()\n self.motion_encoder.eval()\n self.movement_encoder.eval()\n\n # Please note that the results does not following the order of inputs\n def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):\n with torch.no_grad():\n word_embs = word_embs.detach().to(self.device).float()\n pos_ohot = pos_ohot.detach().to(self.device).float()\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motion_wo_foot_contact(motions, self.opt['foot_contact_entries'])).detach()\n m_lens = m_lens // self.opt['unit_length']\n motion_embedding = self.motion_encoder(movements, m_lens)\n\n '''Text Encoding'''\n text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)\n text_embedding = text_embedding[align_idx]\n return text_embedding, motion_embedding\n\n # Please note that the results does not following the order of inputs\n def get_motion_embeddings(self, motions, m_lens):\n with torch.no_grad():\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motion_wo_foot_contact(motions, self.opt['foot_contact_entries'])).detach()\n m_lens = m_lens // self.opt['unit_length']\n motion_embedding = self.motion_encoder(movements, m_lens)\n return motion_embedding" }, { "identifier": "load_controlmdm_and_diffusion", "path": "utils/model_util.py", "snippet": "def load_controlmdm_and_diffusion(args, data, device, ModelClass=ControlMDM, DiffusionClass=ControlGaussianDiffusion): \n model, diffusion = create_model_and_diffusion(args, data, ModelClass=ControlMDM, DiffusionClass=DiffusionClass)\n model_path = args.model_path\n print(f\"Loading checkpoints from [{model_path}]...\")\n state_dict = torch.load(model_path, map_location='cpu')\n load_model_wo_clip(model, state_dict)\n model.mean = data.dataset.t2m_dataset.mean\n model.std = data.dataset.t2m_dataset.std\n\n model.to(device)\n model.eval() # disable random masking\n model = wrap_model(model, args)\n return model, diffusion" }, { "identifier": "ControlMDM", "path": "model/ControlMDM.py", "snippet": "class ControlMDM(MDM):\n\n def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n ablation=None, activation=\"gelu\", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512,\n arch='trans_enc', emb_trans_dec=False, clip_version=None, args=None, **kargs):\n\n super(ControlMDM, self).__init__(modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim, ff_size, num_layers, num_heads, dropout,\n ablation, activation, legacy, data_rep, dataset, clip_dim,\n arch, emb_trans_dec, clip_version, **kargs)\n self.args = args\n self.num_layers = num_layers\n self.multi_person = args.multi_person\n self.upper_orientation_index = [0, 16, 17] # root, l_shoulder, r_shoulder\n self.lower_orientation_index = [0, 1, 2] # root, l_hip, r_hip\n\n # linear layers init with zeros\n if self.dataset == 'kit':\n self.first_zero_linear = nn.Linear(21*3*2 + 2*3, self.latent_dim)\n elif self.dataset == 'humanml':\n self.first_zero_linear = nn.Linear(22*3*2 + 2*3, self.latent_dim)\n else:\n raise NotImplementedError('Supporting only kit and humanml dataset, got {}'.format(self.dataset))\n \n nn.init.zeros_(self.first_zero_linear.weight)\n nn.init.zeros_(self.first_zero_linear.bias)\n self.mid_zero_linear = nn.ModuleList(\n [nn.Linear(self.latent_dim, self.latent_dim) for _ in range(self.num_layers)])\n for m in self.mid_zero_linear:\n nn.init.zeros_(m.weight)\n nn.init.zeros_(m.bias)\n\n if self.arch == 'trans_enc':\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n del self.seqTransEncoder\n self.seqTransEncoder_mdm = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n self.seqTransEncoder_control = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n else:\n raise ValueError('Supporting only trans_enc arch.')\n\n self.freeze_block(self.input_process)\n self.freeze_block(self.sequence_pos_encoder)\n self.freeze_block(self.seqTransEncoder_mdm)\n self.freeze_block(self.embed_timestep)\n if 'text' in self.cond_mode:\n self.freeze_block(self.embed_text)\n self.freeze_block(self.output_process)\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = torch.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = torch.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = torch.add(torch.mul(data, std), mean)\n return output\n \n def compute_triangle_normals(self, triangles):\n # Compute the vectors from the first point to the other two points\n v1 = triangles[:,:, 1] - triangles[:, :,0]\n v2 = triangles[:,:, 2] - triangles[:,:,0]\n\n # Compute the cross product of v1 and v2 to get the normal vectors\n normals = torch.cross(v2, v1, dim=-1)\n\n # Normalize the normal vectors to unit length\n normals = nn.functional.normalize(normals, dim=-1)\n return normals\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n curr_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert curr_joint.shape[1] == 1\n curr_joint = recover_from_ric(curr_joint, n_joints)\n curr_joint = curr_joint.view(-1, *curr_joint.shape[2:]).permute(0, 2, 3, 1)\n # change root positions for multi-person purpose\n if self.multi_person:\n curr_joint[1::2, :,2,:] *= -1\n curr_joint[1::2, :,0,:] *= -1\n curr_joint[1::2, :,2,:] += 2\n\n # more than 3 people\n #curr_joint[1, :,2,:] *= -1\n #curr_joint[1, :,0,:] *= -1\n #curr_joint[1, :,2,:] += 2\n #curr_joint[2, :,0,:] += 1\n return curr_joint\n\n def forward(self, x, timesteps, y=None):\n bs, njoints, nfeats, seqlen = x.shape\n control_bs, n_global_joints, xyz_dim, control_frames = y['global_joint'].shape\n assert bs == control_bs and seqlen == control_frames, \"bs {} != {} or seqlen {} != {}\".format(bs, control_bs, seqlen, control_frames)\n assert xyz_dim ==3, \"xyz_dim {} != 3\".format(xyz_dim)\n # prepare global joints for controlmdm\n curr_joint = self.humanml_to_global_joint(x).clone().detach() # [bs, njoints, 3, seqlen]\n curr_joint.requires_grad = False\n\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n\n # controlmdm\n # orientation\n upper_triangles = curr_joint[:,self.upper_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n lower_triangles = curr_joint[:,self.lower_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n upper_orientation = self.compute_triangle_normals(upper_triangles) # [seqlen, bs, 3]\n lower_orientation = self.compute_triangle_normals(lower_triangles) # [seqlen, bs, 3]\n\n # relative position to joint\n '''\n relative_position = torch.zeros_like(curr_joint, device = xseq.device, dtype=torch.float32) # [bs, njoints, 3, seqlen]\n relative_position[1::2,:,:,:] = ((y['global_joint'][::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,1::2,:,:].unsqueeze(2))*y['global_joint_mask'][::2,:,:,:].bool().float()).float().sum(1)\n relative_position[::2,:,:,:] = ((y['global_joint'][1::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,::2,:,:].unsqueeze(2))*y['global_joint_mask'][1::2,:,:,:].bool().float()).float().sum(1)\n '''\n relative_position = ((y['global_joint'].float() - curr_joint)*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_position = relative_position.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n\n # relative position to root\n relative_root = ((y['global_joint'].float() - curr_joint[:,[0],:,:])*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_root = relative_root.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n global_joint_feat = torch.cat((relative_position, relative_root, upper_orientation, lower_orientation), axis=-1) # [seqlen, bs, 22*3 *2 +3 +3]\n \n global_joint_feat = self.first_zero_linear(global_joint_feat) # [seqlen, bs, d]\n control_input = xseq + torch.cat((torch.zeros_like(emb, device = xseq.device, dtype=torch.float32), global_joint_feat), axis=0) # [seqlen+1, bs, d]\n control_output_list = self.seqTransEncoder_control.return_all_layers(control_input) # [seqlen+1, bs, d]\n for i in range(self.num_layers):\n control_output_list[i] = self.mid_zero_linear[i](control_output_list[i])\n \n output = self.seqTransEncoder_mdm.forward_with_condition(xseq, control_output_list)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output\n\n def trainable_parameters(self):\n return [p for name, p in self.named_parameters() if p.requires_grad]\n # return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n \n def trainable_parameter_names(self):\n return [name for name, p in self.named_parameters() if p.requires_grad]\n\n def freeze_block(self, block):\n block.eval()\n for p in block.parameters():\n p.requires_grad = False\n\n def unfreeze_block(self, block):\n block.train()\n for p in block.parameters():\n p.requires_grad = True\n \n def forward_without_control(self, x, timesteps, y=None): #\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n output = self.seqTransEncoder_mdm(xseq)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output" }, { "identifier": "logger", "path": "diffusion/logger.py", "snippet": "DEBUG = 10\nINFO = 20\nWARN = 30\nERROR = 40\nDISABLED = 50\n DEFAULT = None # A logger with no output files. (See right below class definition)\n CURRENT = None # Current logger being used by the free functions above\nclass KVWriter(object):\nclass SeqWriter(object):\nclass HumanOutputFormat(KVWriter, SeqWriter):\nclass JSONOutputFormat(KVWriter):\nclass CSVOutputFormat(KVWriter):\nclass TensorBoardOutputFormat(KVWriter):\nclass Logger(object):\n def writekvs(self, kvs):\n def writeseq(self, seq):\n def __init__(self, filename_or_file):\n def writekvs(self, kvs):\n def _truncate(self, s):\n def writeseq(self, seq):\n def close(self):\n def __init__(self, filename):\n def writekvs(self, kvs):\n def close(self):\n def __init__(self, filename):\n def writekvs(self, kvs):\n def close(self):\n def __init__(self, dir):\n def writekvs(self, kvs):\n def summary_val(k, v):\n def close(self):\ndef make_output_format(format, ev_dir, log_suffix=\"\"):\ndef logkv(key, val):\ndef logkv_mean(key, val):\ndef logkvs(d):\ndef dumpkvs():\ndef getkvs():\ndef log(*args, level=INFO):\ndef debug(*args):\ndef info(*args):\ndef warn(*args):\ndef error(*args):\ndef set_level(level):\ndef set_comm(comm):\ndef get_dir():\ndef profile_kv(scopename):\ndef profile(n):\n def decorator_with_name(func):\n def func_wrapper(*args, **kwargs):\ndef get_current():\n def __init__(self, dir, output_formats, comm=None):\n def logkv(self, key, val):\n def logkv_mean(self, key, val):\n def dumpkvs(self):\n def log(self, *args, level=INFO):\n def set_level(self, level):\n def set_comm(self, comm):\n def get_dir(self):\n def close(self):\n def _do_log(self, args):\ndef get_rank_without_mpi_import():\ndef mpi_weighted_mean(comm, local_name2valcount):\ndef configure(dir=None, format_strs=None, comm=None, log_suffix=\"\"):\ndef _configure_default_logger():\ndef reset():\ndef scoped_configure(dir=None, format_strs=None, comm=None):" }, { "identifier": "dist_util", "path": "utils/dist_util.py", "snippet": "GPUS_PER_NODE = 8\nSETUP_RETRY_COUNT = 3\ndef setup_dist(device=0):\ndef dev():\ndef load_state_dict(path, **kwargs):\ndef sync_params(params):\ndef _find_free_port():" }, { "identifier": "get_dataset_loader", "path": "data_loaders/get_data.py", "snippet": "def get_dataset_loader(name, batch_size, num_frames, split='train', load_mode='train', opt=None, short_db=False, cropping_sampler=False, size=None):\n if load_mode == 'text_only':\n load_mode = 'train'\n dataset = get_dataset(name, num_frames, split, load_mode, batch_size, opt, short_db, cropping_sampler, size)\n collate = get_collate_fn(name, load_mode)\n\n n_workers = 1 if load_mode in ['movement_train', 'evaluator_train'] else 8\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True,\n num_workers=n_workers, drop_last=True, collate_fn=collate\n )\n\n return loader" }, { "identifier": "wrap_model", "path": "model/cfg_sampler.py", "snippet": "def wrap_model(model, args):\n if args.guidance_param not in [0., 1.]:\n return ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler\n elif args.guidance_param == 0:\n return UnconditionedModel(model)\n else:\n return model" } ]
from diffusion.control_diffusion import ControlGaussianDiffusion from diffusion.respace import SpacedDiffusion from utils.parser_util import evaluation_inpainting_parser from utils.fixseed import fixseed from datetime import datetime from data_loaders.humanml.motion_loaders.model_motion_loaders import get_mdm_loader # get_motion_loader from data_loaders.humanml.utils.metrics import * from data_loaders.humanml.networks.evaluator_wrapper import EvaluatorMDMWrapper from collections import OrderedDict from data_loaders.humanml.scripts.motion_process import * from data_loaders.humanml.utils.utils import * from utils.model_util import load_controlmdm_and_diffusion from model.ControlMDM import ControlMDM from diffusion import logger from utils import dist_util from data_loaders.get_data import get_dataset_loader from model.cfg_sampler import wrap_model
14,366
all_metrics['Skating Ratio'][key] += [item] for key, item in mat_score_dict.items(): if key not in all_metrics['Matching Score']: all_metrics['Matching Score'][key] = [item] else: all_metrics['Matching Score'][key] += [item] for key, item in R_precision_dict.items(): if key not in all_metrics['R_precision']: all_metrics['R_precision'][key] = [item] else: all_metrics['R_precision'][key] += [item] for key, item in fid_score_dict.items(): if key not in all_metrics['FID']: all_metrics['FID'][key] = [item] else: all_metrics['FID'][key] += [item] for key, item in div_score_dict.items(): if key not in all_metrics['Diversity']: all_metrics['Diversity'][key] = [item] else: all_metrics['Diversity'][key] += [item] if run_mm: for key, item in mm_score_dict.items(): if key not in all_metrics['MultiModality']: all_metrics['MultiModality'][key] = [item] else: all_metrics['MultiModality'][key] += [item] # print(all_metrics['Diversity']) mean_dict = {} for metric_name, metric_dict in all_metrics.items(): print('========== %s Summary ==========' % metric_name) print('========== %s Summary ==========' % metric_name, file=f, flush=True) for model_name, values in metric_dict.items(): # print(metric_name, model_name) mean, conf_interval = get_metric_statistics(np.array(values), replication_times) mean_dict[metric_name + '_' + model_name] = mean # print(mean, mean.dtype) if isinstance(mean, np.float64) or isinstance(mean, np.float32): print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}') print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}', file=f, flush=True) elif metric_name == 'Trajectory Error': traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] line = f'---> [{model_name}]' print(line) print(line, file=f, flush=True) line = '' for i in range(len(mean)): # zip(traj_err_key, mean): line += ' (%s): Mean: %.4f CInt: %.4f; \n' % (traj_err_key[i], mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) elif isinstance(mean, np.ndarray): line = f'---> [{model_name}]' for i in range(len(mean)): line += '(top %d) Mean: %.4f CInt: %.4f;' % (i+1, mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) return mean_dict if __name__ == '__main__': args = evaluation_inpainting_parser() assert args.multi_person == False, 'multi-person is not supported for this script' assert args.guidance_param == 2.5 fixseed(args.seed) args.batch_size = 32 # This must be 32! Don't change it! otherwise it will cause a bug in R precision calc! model_name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') dataset_name = args.dataset #log_file = os.path.join(os.path.dirname(args.model_path), 'eval_{}_{}_{}'.format(dataset_name, model_name, niter)) log_file = os.path.join(os.path.dirname(args.model_path), 'eval_niter_' + str(int(niter)) +'_'+ args.control_joint) assert args.inpainting_mask == 'global_joint', "This script only supports global_joint inpainting!" log_file += f'_mask{args.mask_ratio}' log_file += f'_bfgs_first{args.bfgs_times_first}_last{args.bfgs_times_last}_skip{args.bfgs_interval}' if args.use_posterior: log_file += '_posterior' else: log_file += '_x0' log_file += f'_{args.eval_mode}' log_file += '.log' print(f'Will save to log file [{log_file}]') assert args.overwrite or not os.path.exists(log_file), "Log file already exists!" print(f'Eval mode [{args.eval_mode}]') if args.eval_mode == 'debug': num_samples_limit = 1000 # None means no limit (eval over all dataset) run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 5 # about 3 Hrs elif args.eval_mode == 'wo_mm': num_samples_limit = 1000 run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 20 # about 12 Hrs elif args.eval_mode == 'mm_short': num_samples_limit = 1000 run_mm = True mm_num_samples = 100 mm_num_repeats = 30 mm_num_times = 10 diversity_times = 300 replication_times = 5 # about 15 Hrs else: raise ValueError() replication_times = replication_times if args.replication_times is None else args.replication_times dist_util.setup_dist(args.device)
torch.multiprocessing.set_sharing_strategy('file_system') def evaluate_matching_score(eval_wrapper, motion_loaders, file): match_score_dict = OrderedDict({}) R_precision_dict = OrderedDict({}) activation_dict = OrderedDict({}) trajectory_score_dict = OrderedDict({}) skating_ratio_dict = OrderedDict({}) print('========== Evaluating Matching Score ==========') for motion_loader_name, motion_loader in motion_loaders.items(): all_motion_embeddings = [] score_list = [] all_size = 0 matching_score_sum = 0 top_k_count = 0 skate_ratio_sum = 0.0 traj_err = [] traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] # print(motion_loader_name) with torch.no_grad(): for idx, batch in enumerate(motion_loader): if motion_loader_name == 'ground truth': word_embeddings, pos_one_hots, _, sent_lens, motions, m_lens, _, _ = batch else: assert motion_loader_name == 'vald' # tested method named vald as default word_embeddings, pos_one_hots, _, sent_lens, motions, m_lens, _, skate_ratio, err_np = batch text_embeddings, motion_embeddings = eval_wrapper.get_co_embeddings( word_embs=word_embeddings, pos_ohot=pos_one_hots, cap_lens=sent_lens, motions=motions, m_lens=m_lens) dist_mat = euclidean_distance_matrix(text_embeddings.cpu().numpy(),motion_embeddings.cpu().numpy()) matching_score_sum += dist_mat.trace() argsmax = np.argsort(dist_mat, axis=1) top_k_mat = calculate_top_k(argsmax, top_k=3) top_k_count += top_k_mat.sum(axis=0) all_size += text_embeddings.shape[0] all_motion_embeddings.append(motion_embeddings.cpu().numpy()) if motion_loader_name != 'ground truth': traj_err.append(err_np) skate_ratio_sum += skate_ratio.sum() all_motion_embeddings = np.concatenate(all_motion_embeddings, axis=0) matching_score = matching_score_sum / all_size R_precision = top_k_count / all_size match_score_dict[motion_loader_name] = matching_score R_precision_dict[motion_loader_name] = R_precision activation_dict[motion_loader_name] = all_motion_embeddings if motion_loader_name != 'ground truth': ### For trajecotry evaluation ### traj_err = np.concatenate(traj_err).mean(0) trajectory_score_dict[motion_loader_name] = traj_err line = f'---> [{motion_loader_name}] Traj Error: ' print(line) print(line, file=file, flush=True) line = '' for (k, v) in zip(traj_err_key, traj_err): line += ' (%s): %.4f \n' % (k, np.mean(v)) print(line) print(line, file=file, flush=True) # For skating evaluation skating_score = skate_ratio_sum / all_size skating_ratio_dict[motion_loader_name] = skating_score print(f'---> [{motion_loader_name}] Skating Ratio: {skating_score:.4f}') print(f'---> [{motion_loader_name}] Skating Ratio: {skating_score:.4f}', file=file, flush=True) print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}') print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}',file=file,flush=True) line = f'---> [{motion_loader_name}] R_precision: ' for i in range(len(R_precision)): line += '(top %d): %.4f ' % (i + 1, R_precision[i]) print(line) print(line, file=file, flush=True) return match_score_dict, R_precision_dict, activation_dict, trajectory_score_dict, skating_ratio_dict def evaluate_fid(eval_wrapper, groundtruth_loader, activation_dict, file): eval_dict = OrderedDict({}) gt_motion_embeddings = [] print('========== Evaluating FID ==========') with torch.no_grad(): for idx, batch in enumerate(groundtruth_loader): _, _, _, sent_lens, motions, m_lens, _, _ = batch motion_embeddings = eval_wrapper.get_motion_embeddings( motions=motions, m_lens=m_lens ) gt_motion_embeddings.append(motion_embeddings.cpu().numpy()) gt_motion_embeddings = np.concatenate(gt_motion_embeddings, axis=0) gt_mu, gt_cov = calculate_activation_statistics(gt_motion_embeddings) # print(gt_mu) for model_name, motion_embeddings in activation_dict.items(): mu, cov = calculate_activation_statistics(motion_embeddings) # print(mu) fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) print(f'---> [{model_name}] FID: {fid:.4f}') print(f'---> [{model_name}] FID: {fid:.4f}', file=file, flush=True) eval_dict[model_name] = fid return eval_dict def evaluate_diversity(activation_dict, file, diversity_times): eval_dict = OrderedDict({}) print('========== Evaluating Diversity ==========') for model_name, motion_embeddings in activation_dict.items(): diversity = calculate_diversity(motion_embeddings, diversity_times) eval_dict[model_name] = diversity print(f'---> [{model_name}] Diversity: {diversity:.4f}') print(f'---> [{model_name}] Diversity: {diversity:.4f}', file=file, flush=True) return eval_dict def evaluate_multimodality(eval_wrapper, mm_motion_loaders, file, mm_num_times): eval_dict = OrderedDict({}) print('========== Evaluating MultiModality ==========') for model_name, mm_motion_loader in mm_motion_loaders.items(): mm_motion_embeddings = [] with torch.no_grad(): for idx, batch in enumerate(mm_motion_loader): # (1, mm_replications, dim_pos) motions, m_lens = batch motion_embedings = eval_wrapper.get_motion_embeddings(motions[0], m_lens[0]) mm_motion_embeddings.append(motion_embedings.unsqueeze(0)) if len(mm_motion_embeddings) == 0: multimodality = 0 else: mm_motion_embeddings = torch.cat(mm_motion_embeddings, dim=0).cpu().numpy() multimodality = calculate_multimodality(mm_motion_embeddings, mm_num_times) print(f'---> [{model_name}] Multimodality: {multimodality:.4f}') print(f'---> [{model_name}] Multimodality: {multimodality:.4f}', file=file, flush=True) eval_dict[model_name] = multimodality return eval_dict def get_metric_statistics(values, replication_times): mean = np.mean(values, axis=0) std = np.std(values, axis=0) conf_interval = 1.96 * std / np.sqrt(replication_times) return mean, conf_interval def evaluation(eval_wrapper, gt_loader, eval_motion_loaders, log_file, replication_times, diversity_times, mm_num_times, run_mm=False): with open(log_file, 'w') as f: all_metrics = OrderedDict({'Matching Score': OrderedDict({}), 'R_precision': OrderedDict({}), 'FID': OrderedDict({}), 'Diversity': OrderedDict({}), 'MultiModality': OrderedDict({}), 'Trajectory Error': OrderedDict({}), 'Skating Ratio': OrderedDict({}), }) for replication in range(replication_times): motion_loaders = {} mm_motion_loaders = {} for motion_loader_name, motion_loader_getter in eval_motion_loaders.items(): motion_loader, mm_motion_loader = motion_loader_getter() motion_loaders[motion_loader_name] = motion_loader mm_motion_loaders[motion_loader_name] = mm_motion_loader motion_loaders['ground truth'] = gt_loader print(f'==================== Replication {replication} ====================') print(f'==================== Replication {replication} ====================', file=f, flush=True) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) mat_score_dict, R_precision_dict, acti_dict, trajectory_score_dict, skating_ratio_dict = evaluate_matching_score(eval_wrapper, motion_loaders, f) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) fid_score_dict = evaluate_fid(eval_wrapper, gt_loader, acti_dict, f) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) div_score_dict = evaluate_diversity(acti_dict, f, diversity_times) if run_mm: print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) mm_score_dict = evaluate_multimodality(eval_wrapper, mm_motion_loaders, f, mm_num_times) print(f'!!! DONE !!!') print(f'!!! DONE !!!', file=f, flush=True) for key, item in trajectory_score_dict.items(): if key not in all_metrics['Trajectory Error']: all_metrics['Trajectory Error'][key] = [item] else: all_metrics['Trajectory Error'][key] += [item] for key, item in skating_ratio_dict.items(): if key not in all_metrics['Skating Ratio']: all_metrics['Skating Ratio'][key] = [item] else: all_metrics['Skating Ratio'][key] += [item] for key, item in mat_score_dict.items(): if key not in all_metrics['Matching Score']: all_metrics['Matching Score'][key] = [item] else: all_metrics['Matching Score'][key] += [item] for key, item in R_precision_dict.items(): if key not in all_metrics['R_precision']: all_metrics['R_precision'][key] = [item] else: all_metrics['R_precision'][key] += [item] for key, item in fid_score_dict.items(): if key not in all_metrics['FID']: all_metrics['FID'][key] = [item] else: all_metrics['FID'][key] += [item] for key, item in div_score_dict.items(): if key not in all_metrics['Diversity']: all_metrics['Diversity'][key] = [item] else: all_metrics['Diversity'][key] += [item] if run_mm: for key, item in mm_score_dict.items(): if key not in all_metrics['MultiModality']: all_metrics['MultiModality'][key] = [item] else: all_metrics['MultiModality'][key] += [item] # print(all_metrics['Diversity']) mean_dict = {} for metric_name, metric_dict in all_metrics.items(): print('========== %s Summary ==========' % metric_name) print('========== %s Summary ==========' % metric_name, file=f, flush=True) for model_name, values in metric_dict.items(): # print(metric_name, model_name) mean, conf_interval = get_metric_statistics(np.array(values), replication_times) mean_dict[metric_name + '_' + model_name] = mean # print(mean, mean.dtype) if isinstance(mean, np.float64) or isinstance(mean, np.float32): print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}') print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}', file=f, flush=True) elif metric_name == 'Trajectory Error': traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] line = f'---> [{model_name}]' print(line) print(line, file=f, flush=True) line = '' for i in range(len(mean)): # zip(traj_err_key, mean): line += ' (%s): Mean: %.4f CInt: %.4f; \n' % (traj_err_key[i], mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) elif isinstance(mean, np.ndarray): line = f'---> [{model_name}]' for i in range(len(mean)): line += '(top %d) Mean: %.4f CInt: %.4f;' % (i+1, mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) return mean_dict if __name__ == '__main__': args = evaluation_inpainting_parser() assert args.multi_person == False, 'multi-person is not supported for this script' assert args.guidance_param == 2.5 fixseed(args.seed) args.batch_size = 32 # This must be 32! Don't change it! otherwise it will cause a bug in R precision calc! model_name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') dataset_name = args.dataset #log_file = os.path.join(os.path.dirname(args.model_path), 'eval_{}_{}_{}'.format(dataset_name, model_name, niter)) log_file = os.path.join(os.path.dirname(args.model_path), 'eval_niter_' + str(int(niter)) +'_'+ args.control_joint) assert args.inpainting_mask == 'global_joint', "This script only supports global_joint inpainting!" log_file += f'_mask{args.mask_ratio}' log_file += f'_bfgs_first{args.bfgs_times_first}_last{args.bfgs_times_last}_skip{args.bfgs_interval}' if args.use_posterior: log_file += '_posterior' else: log_file += '_x0' log_file += f'_{args.eval_mode}' log_file += '.log' print(f'Will save to log file [{log_file}]') assert args.overwrite or not os.path.exists(log_file), "Log file already exists!" print(f'Eval mode [{args.eval_mode}]') if args.eval_mode == 'debug': num_samples_limit = 1000 # None means no limit (eval over all dataset) run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 5 # about 3 Hrs elif args.eval_mode == 'wo_mm': num_samples_limit = 1000 run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 20 # about 12 Hrs elif args.eval_mode == 'mm_short': num_samples_limit = 1000 run_mm = True mm_num_samples = 100 mm_num_repeats = 30 mm_num_times = 10 diversity_times = 300 replication_times = 5 # about 15 Hrs else: raise ValueError() replication_times = replication_times if args.replication_times is None else args.replication_times dist_util.setup_dist(args.device)
logger.configure()
8
2023-11-27 05:28:02+00:00
16k
moonbow721/DPoser
run/motion_denoising.py
[ { "identifier": "save_obj", "path": "lib/body_model/visual.py", "snippet": "def save_obj(v, f, file_name='output.obj'):\n obj_file = open(file_name, 'w')\n for i in range(len(v)):\n obj_file.write('v ' + str(v[i][0]) + ' ' + str(v[i][1]) + ' ' + str(v[i][2]) + '\\n')\n for i in range(len(f)):\n obj_file.write('f ' + str(f[i][0] + 1) + '/' + str(f[i][0] + 1) + ' ' + str(f[i][1] + 1) + '/' + str(\n f[i][1] + 1) + ' ' + str(f[i][2] + 1) + '/' + str(f[i][2] + 1) + '\\n')\n obj_file.close()" }, { "identifier": "render_mesh", "path": "lib/body_model/visual.py", "snippet": "def render_mesh(img, mesh, face, cam_param, view='random'):\n # mesh\n mesh = trimesh.Trimesh(mesh, face)\n\n centroid = np.mean(mesh.vertices, axis=0)\n translation_to_origin = trimesh.transformations.translation_matrix(-centroid)\n mesh.apply_transform(translation_to_origin)\n\n if view == 'random':\n options_side = ['half', '']\n options_direction = ['left', 'right', 'front', 'back']\n options_height = ['above', 'bottom', '']\n\n chosen_side = random.choice(options_side)\n chosen_direction = random.choice(options_direction)\n chosen_height = random.choice(options_height)\n\n view = '_'.join([opt for opt in [chosen_side, chosen_direction, chosen_height] if opt])\n\n if 'half' in view:\n side_angle = 45\n else:\n side_angle = 90\n\n if 'left' in view:\n angle = np.radians(-side_angle)\n elif 'right' in view:\n angle = np.radians(side_angle)\n elif 'back' in view:\n angle = np.radians(180)\n else: # front\n angle = np.radians(0)\n axis = [0, 1, 0]\n rotation = trimesh.transformations.rotation_matrix(angle, axis)\n mesh.apply_transform(rotation)\n\n if 'above' in view:\n angle = np.radians(30)\n elif 'bottom' in view:\n angle = np.radians(-30)\n else: # nothing\n angle = np.radians(0)\n axis = [1, 0, 0]\n rotation = trimesh.transformations.rotation_matrix(angle, axis)\n mesh.apply_transform(rotation)\n\n translation_to_centroid = trimesh.transformations.translation_matrix(centroid)\n mesh.apply_transform(translation_to_centroid)\n\n mesh.vertices[:, 2] -= 7\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.0, alphaMode='OPAQUE',\n # baseColorFactor=(1.0, 1.0, 0.9, 1.0),\n baseColorFactor=(0.93, 0.6, 0.4, 1.0),\n )\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material, smooth=False)\n scene = pyrender.Scene(ambient_light=(0.3, 0.3, 0.3))\n scene.add(mesh, 'mesh')\n\n focal, princpt = cam_param['focal'], cam_param['princpt']\n camera = pyrender.IntrinsicsCamera(fx=focal[0], fy=focal[1], cx=princpt[0], cy=princpt[1])\n scene.add(camera)\n\n # renderer\n renderer = pyrender.OffscreenRenderer(viewport_width=img.shape[1], viewport_height=img.shape[0], point_size=1.0)\n\n # light\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=0.8)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n\n # render\n rgb, depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n rgb = rgb[:, :, :3].astype(np.float32)\n valid_mask = (depth > 0)[:, :, None]\n\n # save to image\n render_img = rgb * valid_mask + img * (1 - valid_mask)\n return render_img" }, { "identifier": "faster_render", "path": "lib/body_model/visual.py", "snippet": "def faster_render(vertices, faces, target_path, img_name, device, idx_map=None):\n os.makedirs(target_path, exist_ok=True)\n R, T = look_at_view_transform(2.0, 0, 0)\n cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n\n raster_settings = RasterizationSettings(\n image_size=256,\n blur_radius=0.0,\n faces_per_pixel=1,\n )\n\n lights = PointLights(device=device, location=[[0.0, 0.0, 3.0]])\n\n renderer = MeshRenderer(\n rasterizer=MeshRasterizer(\n cameras=cameras,\n raster_settings=raster_settings\n ),\n shader=SoftPhongShader(\n device=device,\n cameras=cameras,\n lights=lights\n )\n )\n # create mesh from vertices\n verts_rgb = torch.ones_like(vertices) # (1, V, 3)\n textures = TexturesVertex(verts_features=verts_rgb.to(device))\n\n meshes = Meshes(vertices, faces.unsqueeze(0).repeat(len(vertices), 1, 1), textures=textures)\n images = renderer(meshes)\n\n for idx in range(len(vertices)):\n save_idx = idx if idx_map is None else idx_map[idx]\n cv2.imwrite(os.path.join(target_path, img_name.format(save_idx + 1)),\n cv2.cvtColor(images[idx, ..., :3].detach().cpu().numpy() * 255, cv2.COLOR_RGB2BGR))" }, { "identifier": "vis_skeletons", "path": "lib/body_model/visual.py", "snippet": "def vis_skeletons(joints_3d, output_path):\n rotation_angle_x = np.pi # 180 degrees rotation around X-axis\n rotation_matrix_x = get_rotation_matrix_x(rotation_angle_x)\n joints_3d = rotate_points(joints_3d, rotation_matrix_x)\n\n kpt_3d_vis = np.ones((22, 1))\n kps_lines = get_smpl_skeleton()\n\n # Check the dimensions of the joints_data\n if len(joints_3d.shape) == 2:\n visualize_3d_skeleton(joints_3d, kpt_3d_vis, kps_lines, output_path=output_path)\n elif len(joints_3d.shape) == 3:\n visualize_skeleton_sequence(joints_3d, kpt_3d_vis, kps_lines, output_path)" }, { "identifier": "sde_lib", "path": "lib/algorithms/advanced/sde_lib.py", "snippet": "class SDE(abc.ABC):\n class RSDE(self.__class__):\nclass VPSDE(SDE):\nclass subVPSDE(SDE):\nclass VESDE(SDE):\n def __init__(self, N):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def reverse(self, score_fn, probability_flow=False):\n def __init__(self):\n def T(self):\n def sde(self, x, t, condition=None, mask=None, guide=False):\n def discretize(self, x, t, condition=None, mask=None):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def return_alpha_sigma(self, t):\n def __init__(self, sigma_min=0.01, sigma_max=50, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device))\n N = self.N\n T = self.T\n N = np.prod(shape[1:])\n G = sqrt_beta\n N = np.prod(shape[1:])\n N = np.prod(shape[1:])\n G = torch.sqrt(sigma ** 2 - adjacent_sigma ** 2)" }, { "identifier": "sampling", "path": "lib/algorithms/advanced/sampling.py", "snippet": "_CORRECTORS = {}\n_PREDICTORS = {}\ndef register_predictor(cls=None, *, name=None):\n def _register(cls):\ndef register_corrector(cls=None, *, name=None):\n def _register(cls):\ndef get_predictor(name):\ndef get_corrector(name):\ndef get_sampling_fn(config, sde, shape, inverse_scaler, eps, device=None):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def update_fn_guide(self, x_t, t, observation, mask, condition=None, grad_step=1.0):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def vesde_update_fn(self, x, t):\n def vpsde_update_fn(self, x, t):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\ndef shared_predictor_update_fn(x, t, observation, mask, sde, model, predictor, probability_flow, continuous):\ndef shared_corrector_update_fn(x, t, observation, mask, sde, model, corrector, continuous, snr, n_steps):\ndef get_pc_sampler(sde, shape, predictor, corrector, inverse_scaler, snr,\n n_steps=1, probability_flow=False, continuous=False,\n denoise=True, eps=1e-3, device='cuda'):\n def get_imputation_update_fn(update_fn):\n def imputation_update_fn(x, vec_t, observation, mask, model, args):\n def pc_sampler(model, observation=None, mask=None, z=None, start_step=0, args=None):\ndef get_ode_sampler(sde, shape, inverse_scaler,\n denoise=False, rtol=1e-5, atol=1e-5,\n method='RK45', eps=1e-3, device='cuda'):\n def denoise_update_fn(model, x):\n def drift_fn(model, x, t):\n def ode_sampler(model, z=None):\n def ode_func(t, x):\nclass Predictor(abc.ABC):\nclass Corrector(abc.ABC):\nclass EulerMaruyamaPredictor(Predictor):\nclass ReverseDiffusionPredictor(Predictor):\nclass AncestralSamplingPredictor(Predictor):\nclass NonePredictor(Predictor):\nclass LangevinCorrector(Corrector):\nclass AnnealedLangevinDynamics(Corrector):\nclass NoneCorrector(Corrector):" }, { "identifier": "utils", "path": "lib/algorithms/advanced/utils.py", "snippet": "_MODELS = {}\ndef register_model(cls=None, *, name=None):\n def _register(cls):\ndef get_model(name):\ndef get_sigmas(config):\ndef get_ddpm_params(config):\ndef create_model(config):\ndef get_model_fn(model, train=False):\n def model_fn(x, labels, condition, mask):\ndef get_score_fn(sde, model, train=False, continuous=False):\n def score_fn(x, t, condition, mask):\n def score_fn(x, t, condition, mask):\ndef to_flattened_numpy(x):\ndef from_flattened_numpy(x, shape):" }, { "identifier": "ScoreModelFC", "path": "lib/algorithms/advanced/model.py", "snippet": "class ScoreModelFC(nn.Module):\n \"\"\"\n Independent condition feature projection layers for each block\n \"\"\"\n\n def __init__(self, config, n_poses=21, pose_dim=6, hidden_dim=64,\n embed_dim=32, n_blocks=2):\n super(ScoreModelFC, self).__init__()\n\n self.config = config\n self.n_poses = n_poses\n self.joint_dim = pose_dim\n self.n_blocks = n_blocks\n\n self.act = get_act(config)\n\n self.pre_dense = nn.Linear(n_poses * pose_dim, hidden_dim)\n self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)\n self.pre_dense_cond = nn.Linear(hidden_dim, hidden_dim)\n self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)\n self.dropout = nn.Dropout(p=config.model.dropout)\n\n # time embedding\n self.time_embedding_type = config.model.embedding_type.lower()\n if self.time_embedding_type == 'fourier':\n self.gauss_proj = GaussianFourierProjection(embed_dim=embed_dim, scale=config.model.fourier_scale)\n elif self.time_embedding_type == 'positional':\n self.posit_proj = functools.partial(get_timestep_embedding, embedding_dim=embed_dim)\n else:\n assert 0\n\n self.shared_time_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n self.act,\n )\n self.register_buffer('sigmas', torch.tensor(get_sigmas(config), dtype=torch.float))\n\n for idx in range(n_blocks):\n setattr(self, f'b{idx + 1}_dense1', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense1_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))\n\n setattr(self, f'b{idx + 1}_dense2', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense2_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))\n\n self.post_dense = nn.Linear(hidden_dim, n_poses * pose_dim)\n\n def forward(self, batch, t, condition=None, mask=None):\n \"\"\"\n batch: [B, j*3] or [B, j*6]\n t: [B]\n Return: [B, j*3] or [B, j*6] same dim as batch\n \"\"\"\n bs = batch.shape[0]\n\n # batch = batch.view(bs, -1) # [B, j*3]\n\n # time embedding\n if self.time_embedding_type == 'fourier':\n # Gaussian Fourier features embeddings.\n used_sigmas = t\n temb = self.gauss_proj(torch.log(used_sigmas))\n elif self.time_embedding_type == 'positional':\n # Sinusoidal positional embeddings.\n timesteps = t\n used_sigmas = self.sigmas[t.long()]\n temb = self.posit_proj(timesteps)\n else:\n raise ValueError(f'time embedding type {self.time_embedding_type} unknown.')\n\n temb = self.shared_time_embed(temb)\n\n h = self.pre_dense(batch)\n h += self.pre_dense_t(temb)\n h = self.pre_gnorm(h)\n h = self.act(h)\n h = self.dropout(h)\n\n for idx in range(self.n_blocks):\n h1 = getattr(self, f'b{idx + 1}_dense1')(h)\n h1 += getattr(self, f'b{idx + 1}_dense1_t')(temb)\n h1 = getattr(self, f'b{idx + 1}_gnorm1')(h1)\n h1 = self.act(h1)\n # dropout, maybe\n h1 = self.dropout(h1)\n\n h2 = getattr(self, f'b{idx + 1}_dense2')(h1)\n h2 += getattr(self, f'b{idx + 1}_dense2_t')(temb)\n h2 = getattr(self, f'b{idx + 1}_gnorm2')(h2)\n h2 = self.act(h2)\n # dropout, maybe\n h2 = self.dropout(h2)\n\n h = h + h2\n\n res = self.post_dense(h) # [B, j*3]\n\n ''' normalize the output '''\n if self.config.model.scale_by_sigma:\n used_sigmas = used_sigmas.reshape((bs, 1))\n res = res / used_sigmas\n\n return res" }, { "identifier": "ExponentialMovingAverage", "path": "lib/algorithms/ema.py", "snippet": "class ExponentialMovingAverage:\n \"\"\"\n Maintains (exponential) moving average of a set of parameters.\n \"\"\"\n\n def __init__(self, parameters, decay=0.999, use_num_updates=True):\n \"\"\"\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the result of\n `model.parameters()`.\n decay: The exponential decay.\n use_num_updates: Whether to use number of updates when computing\n averages.\n \"\"\"\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n self.decay = decay\n self.num_updates = 0 if use_num_updates else None\n self.shadow_params = [p.clone().detach()\n for p in parameters if p.requires_grad]\n self.collected_params = []\n\n def update(self, parameters):\n \"\"\"\n Update currently maintained parameters.\n\n Call this every time the parameters are updated, such as the result of\n the `optimizer.step()` call.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the same set of\n parameters used to initialize this object.\n \"\"\"\n decay = self.decay\n if self.num_updates is not None:\n self.num_updates += 1\n decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))\n one_minus_decay = 1.0 - decay\n with torch.no_grad():\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n s_param.sub_(one_minus_decay * (s_param - param))\n\n def copy_to(self, parameters):\n \"\"\"\n Copy current parameters into given collection of parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored moving averages.\n \"\"\"\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n if param.requires_grad:\n param.data.copy_(s_param.data)\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n\n def state_dict(self):\n return dict(decay=self.decay, num_updates=self.num_updates,\n shadow_params=self.shadow_params)\n\n def load_state_dict(self, state_dict):\n self.decay = state_dict['decay']\n self.num_updates = state_dict['num_updates']\n self.shadow_params = state_dict['shadow_params']" }, { "identifier": "BodyModel", "path": "lib/body_model/body_model.py", "snippet": "class BodyModel(nn.Module):\r\n '''\r\n Wrapper around SMPLX body model class.\r\n from https://github.com/davrempe/humor/blob/main/humor/body_model/body_model.py\r\n '''\r\n\r\n def __init__(self,\r\n bm_path,\r\n num_betas=10,\r\n batch_size=1,\r\n num_expressions=10,\r\n model_type='smplx'):\r\n super(BodyModel, self).__init__()\r\n '''\r\n Creates the body model object at the given path.\r\n\r\n :param bm_path: path to the body model pkl file\r\n :param num_expressions: only for smplx\r\n :param model_type: one of [smpl, smplh, smplx]\r\n :param use_vtx_selector: if true, returns additional vertices as joints that correspond to OpenPose joints\r\n '''\r\n\r\n kwargs = {\r\n 'model_type': model_type,\r\n 'num_betas': num_betas,\r\n 'batch_size': batch_size,\r\n 'num_expression_coeffs': num_expressions,\r\n 'use_pca': False,\r\n 'flat_hand_mean': True\r\n }\r\n\r\n assert (model_type in ['smpl', 'smplh', 'smplx'])\r\n if model_type == 'smpl':\r\n self.bm = SMPL(bm_path, **kwargs)\r\n self.num_joints = SMPL.NUM_JOINTS\r\n elif model_type == 'smplh':\r\n # smplx does not support .npz by default, so have to load in manually\r\n smpl_dict = np.load(bm_path, encoding='latin1')\r\n data_struct = Struct(**smpl_dict)\r\n # print(smpl_dict.files)\r\n if model_type == 'smplh':\r\n data_struct.hands_componentsl = np.zeros((0))\r\n data_struct.hands_componentsr = np.zeros((0))\r\n data_struct.hands_meanl = np.zeros((15 * 3))\r\n data_struct.hands_meanr = np.zeros((15 * 3))\r\n V, D, B = data_struct.shapedirs.shape\r\n data_struct.shapedirs = np.concatenate(\r\n [data_struct.shapedirs, np.zeros((V, D, SMPL.SHAPE_SPACE_DIM - B))],\r\n axis=-1) # super hacky way to let smplh use 16-size beta\r\n kwargs['data_struct'] = data_struct\r\n self.bm = SMPLH(bm_path, **kwargs)\r\n self.num_joints = SMPLH.NUM_JOINTS\r\n elif model_type == 'smplx':\r\n self.bm = SMPLX(bm_path, **kwargs)\r\n self.num_joints = SMPLX.NUM_JOINTS\r\n\r\n self.model_type = model_type\r\n self.J_regressor = self.bm.J_regressor.numpy()\r\n self.J_regressor_idx = {'pelvis': 0, 'lwrist': 20, 'rwrist': 21, 'neck': 12}\r\n\r\n def forward(self, root_orient=None, pose_body=None, pose_hand=None, pose_jaw=None, pose_eye=None, betas=None,\r\n trans=None, dmpls=None, expression=None, return_dict=False, **kwargs):\r\n '''\r\n Note dmpls are not supported.\r\n '''\r\n assert (dmpls is None)\r\n # parameters of SMPL should not be updated\r\n out_obj = self.bm(\r\n betas=betas,\r\n global_orient=root_orient,\r\n body_pose=pose_body,\r\n left_hand_pose=None if pose_hand is None else pose_hand[:, :(SMPLH.NUM_HAND_JOINTS * 3)],\r\n right_hand_pose=None if pose_hand is None else pose_hand[:, (SMPLH.NUM_HAND_JOINTS * 3):],\r\n transl=trans,\r\n expression=expression,\r\n jaw_pose=pose_jaw,\r\n leye_pose=None if pose_eye is None else pose_eye[:, :3],\r\n reye_pose=None if pose_eye is None else pose_eye[:, 3:],\r\n return_full_pose=True,\r\n **kwargs\r\n )\r\n\r\n out = {\r\n 'v': out_obj.vertices,\r\n 'f': self.bm.faces_tensor,\r\n 'betas': out_obj.betas,\r\n 'Jtr': out_obj.joints,\r\n 'body_joints': out_obj.joints[:22], # only body joints\r\n 'pose_body': out_obj.body_pose,\r\n 'full_pose': out_obj.full_pose\r\n }\r\n if self.model_type in ['smplh', 'smplx']:\r\n out['pose_hand'] = torch.cat([out_obj.left_hand_pose, out_obj.right_hand_pose], dim=-1)\r\n if self.model_type == 'smplx':\r\n out['pose_jaw'] = out_obj.jaw_pose\r\n out['pose_eye'] = pose_eye\r\n\r\n # if not self.use_vtx_selector:\r\n # # don't need extra joints\r\n # out['Jtr'] = out['Jtr'][:, :self.num_joints + 1] # add one for the root\r\n\r\n if not return_dict:\r\n out = Struct(**out)\r\n\r\n return out\r" }, { "identifier": "linear_interpolation", "path": "lib/utils/misc.py", "snippet": "def linear_interpolation(A, B, frames):\r\n alpha = torch.linspace(0, 1, frames, device=A.device)[:, None]\r\n interpolated = (1 - alpha) * A + alpha * B\r\n return interpolated\r" }, { "identifier": "gaussian_smoothing", "path": "lib/utils/misc.py", "snippet": "def gaussian_smoothing(data, window_size, sigma):\r\n kernel = torch.arange(window_size).float() - window_size // 2\r\n kernel = torch.exp(-0.5 * (kernel / sigma) ** 2)\r\n kernel /= kernel.sum()\r\n\r\n kernel = kernel.unsqueeze(0).unsqueeze(0).to(data.device)\r\n data = data.transpose(0, 1).unsqueeze(1)\r\n\r\n smoothed_data = F.conv1d(data, kernel, padding=window_size//2)\r\n\r\n smoothed_data = smoothed_data.squeeze(1).transpose(0, 1)\r\n return smoothed_data" }, { "identifier": "seq_to_video", "path": "lib/utils/motion_video.py", "snippet": "def seq_to_video(img_folder_path, output_merge_folder, video_path):\r\n img_number = len(os.listdir(img_folder_path))\r\n\r\n if not os.path.exists(output_merge_folder):\r\n os.makedirs(output_merge_folder)\r\n\r\n def add_title(img, title_text, position=None, blank_height=30, font_scale=0.9):\r\n h, w = img.shape[:2]\r\n blank = 255 * np.ones((blank_height, w, 3), dtype=np.uint8)\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n thickness = 2\r\n text_size = cv2.getTextSize(title_text, font, font_scale, thickness)[0]\r\n\r\n if position is None:\r\n x = (w - text_size[0]) // 2\r\n y = (blank_height + text_size[1]) // 2 - 5\r\n else:\r\n x, y = position\r\n\r\n cv2.putText(blank, title_text, (x, y), font, font_scale, (0, 0, 0), thickness, cv2.LINE_AA)\r\n return np.vstack((img, blank))\r\n\r\n for i in range(img_number // 3):\r\n frame_name = os.path.join(img_folder_path, \"frame_{:04d}.png\".format(i))\r\n out_name = os.path.join(img_folder_path, \"out_{:04d}.png\".format(i))\r\n gt_name = os.path.join(img_folder_path, \"gt_{:04d}.png\".format(i))\r\n\r\n joint_img = process_joint(frame_name)\r\n out_img = process_body(out_name)\r\n gt_img = process_body(gt_name)\r\n\r\n # add titles\r\n joint_img = add_title(joint_img, \"Noisy Joints\")\r\n out_img = add_title(out_img, \"DPoser(Ours)\")\r\n gt_img = add_title(gt_img, \"GT\")\r\n\r\n # concat\r\n merged_img = np.hstack((joint_img, out_img, gt_img))\r\n cv2.imwrite(os.path.join(output_merge_folder, \"merge_{:04d}.png\".format(i)), merged_img)\r\n\r\n # seq -> video\r\n images_to_video(output_merge_folder, video_path)\r" }, { "identifier": "Posenormalizer", "path": "lib/dataset/AMASS.py", "snippet": "class Posenormalizer:\r\n def __init__(self, data_path, device='cuda:0', normalize=True, min_max=True, rot_rep=None):\r\n assert rot_rep in ['rot6d', 'axis']\r\n self.normalize = normalize\r\n self.min_max = min_max\r\n self.rot_rep = rot_rep\r\n normalize_params = torch.load(os.path.join(data_path, '{}_normalize1.pt'.format(rot_rep)))\r\n self.min_poses, self.max_poses = normalize_params['min_poses'].to(device), normalize_params['max_poses'].to(device)\r\n normalize_params = torch.load(os.path.join(data_path, '{}_normalize2.pt'.format(rot_rep)))\r\n self.mean_poses, self.std_poses = normalize_params['mean_poses'].to(device), normalize_params['std_poses'].to(device)\r\n\r\n def offline_normalize(self, poses, from_axis=False):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n pose_shape = poses.shape\r\n if from_axis and self.rot_rep == 'rot6d':\r\n poses = axis_angle_to_rot6d(poses.reshape(-1, 3)).reshape(*pose_shape[:-1], -1)\r\n\r\n if not self.normalize:\r\n return poses\r\n\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1)\r\n max_poses = self.max_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n normalized_poses = 2 * (poses - min_poses) / (max_poses - min_poses) - 1\r\n\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1)\r\n std_poses = self.std_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n normalized_poses = (poses - mean_poses) / std_poses\r\n\r\n return normalized_poses\r\n\r\n def offline_denormalize(self, poses, to_axis=False):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n\r\n if not self.normalize:\r\n denormalized_poses = poses\r\n else:\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1)\r\n max_poses = self.max_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n denormalized_poses = 0.5 * ((poses + 1) * (max_poses - min_poses) + 2 * min_poses)\r\n\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1)\r\n std_poses = self.std_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n denormalized_poses = poses * std_poses + mean_poses\r\n\r\n if to_axis and self.rot_rep == 'rot6d':\r\n pose_shape = denormalized_poses.shape\r\n denormalized_poses = rot6d_to_axis_angle(denormalized_poses.reshape(-1, 6)).reshape(*pose_shape[:-1], -1)\r\n\r\n return denormalized_poses\r" }, { "identifier": "N_POSES", "path": "lib/dataset/AMASS.py", "snippet": "N_POSES = 21\r" } ]
import csv import os import cv2 import math import numpy as np import torch import torch.nn as nn from absl import flags, app from absl.flags import argparse_flags from ml_collections.config_flags import config_flags from tqdm import tqdm from lib.body_model.visual import save_obj, render_mesh, faster_render, vis_skeletons from lib.algorithms.advanced import sde_lib, sampling from lib.algorithms.advanced import utils as mutils from lib.algorithms.advanced.model import ScoreModelFC from lib.algorithms.ema import ExponentialMovingAverage from lib.body_model.body_model import BodyModel from lib.utils.misc import linear_interpolation, gaussian_smoothing from lib.utils.motion_video import seq_to_video from lib.dataset.AMASS import Posenormalizer, N_POSES
11,826
quan_t = torch.randint(self.sde.N, [1]) elif time_strategy == '2': quan_t = torch.tensor(sample_time) elif time_strategy == '3': quan_t = self.sde.N - math.floor(torch.tensor(total_steps - step - 1) * (self.sde.N / (sample_trun * total_steps))) - 2 else: raise NotImplementedError('unsupported time sampling strategy') t = timesteps[quan_t] vec_t = torch.ones(self.batch_size, device=self.device) * t loss_dict['dposer'] = self.DPoser_loss(poses, vec_t, quan_t) ''' *********** DPoser loss ************ ''' # calculate temporal loss between mesh vertices smpl_init = self.body_model(betas=smpl_init.betas, pose_body=smpl_init.pose_body) temp_term = smpl_init.v[:-1] - smpl_init.v[1:] loss_dict['temp'] = torch.mean(torch.sqrt(torch.sum(temp_term * temp_term, dim=2))) # calculate data term from inital noisy pose data_term = smpl_init.Jtr[:, :22] - init_joints data_term = torch.mean(torch.sqrt(torch.sum(data_term * data_term, dim=2))) if data_term > 0: # for nans loss_dict['data'] = data_term # Get total loss for backward pass tot_loss = self.backward_step(loss_dict, weight_dict, it) tot_loss.backward() optimizer.step() if verbose: # only for check joint_error = smpl_init.Jtr[:, :22] - smpl_gt.Jtr[:, :22] joint_error = torch.mean(torch.sqrt(torch.sum(joint_error * joint_error, dim=2))) * 100. l_str = 'Step: {} Iter: {}'.format(it, i) l_str += ' j2j : {:0.8f}'.format(joint_error) l_str += ' total : {:0.8f}'.format(tot_loss) for k in loss_dict: l_str += ', {}: {:0.8f}'.format(k, loss_dict[k].mean().item()) loop.set_description(l_str) # create final results, the smoothing can be used to create consistent demo videos # Note that we do not use the smoothing for evaluation in our paper smooth_pose = gaussian_smoothing(smpl_init.pose_body, window_size=3, sigma=2) idx = [0, -1] smooth_pose[idx] = smpl_init.pose_body[idx] smpl_init = self.body_model(betas=self.betas, pose_body=smooth_pose) if vis: self.visualize(smpl_init.v, smpl_init.f, self.out_path, render=True, prefix='out', device=self.device) seq_to_video(os.path.join(self.out_path, 'renders'), os.path.join(self.out_path, 'merges'), video_path=os.path.join(self.out_path, 'motion.mp4')) joint_error = smpl_init.Jtr[:, :22] - smpl_gt.Jtr[:, :22] vert_error = smpl_init.v - smpl_gt.v MPJPE = torch.mean(torch.sqrt(torch.sum(joint_error * joint_error, dim=2)), dim=1) * 100. # remain batch dim MPVPE = torch.mean(torch.sqrt(torch.sum(vert_error * vert_error, dim=2)), dim=1) * 100. if verbose: print('after denoising:{:0.8f} cm'.format(MPJPE.mean())) results_dict = {'init_MPJPE': init_MPJPE.detach().cpu().numpy(), 'MPJPE': MPJPE.detach().cpu().numpy(), 'MPVPE': MPVPE.detach().cpu().numpy()} return results_dict def denoise(config, args, model, gt_file, out_path, std=0.04, verbose=False): motion_data_gt = np.load(gt_file)['pose_body'] batch_size = len(motion_data_gt) gt_poses = torch.from_numpy(motion_data_gt.astype(np.float32)).to(args.device) # [batchsize, 63] # load body model body_model = BodyModel(bm_path=args.bodymodel_path, model_type='smplx', batch_size=batch_size, num_betas=10).to( args.device) # generate noise on joints std = std joints3d = body_model(pose_body=gt_poses).Jtr[:, :22] noisy_joints3d = joints3d + std * torch.randn(*joints3d.shape, device=joints3d.device) if args.time_strategy in ['1']: sde_N = 500 dposer_weight = 1e-1 else: sde_N = 500 dposer_weight = 1.0 # If you try to reduce 'sample_trun' or 'sample_time', reduce weight too for converge. # create Motion denoiser layer motion_denoiser = MotionDenoise(config, args, model, sde_N=sde_N, body_model=body_model, dposer_weight=dposer_weight, # For axis setting 1e-1, batch_size=batch_size, out_path=out_path) if std == 0.02: kwargs = {'iterations': 3, 'steps_per_iter': 40, 'sample_trun': 10.0, 'sample_time': 495} elif std == 0.04: kwargs = {'iterations': 3, 'steps_per_iter': 60, 'sample_trun': 4.0, 'sample_time': 490} elif std == 0.1: kwargs = {'iterations': 3, 'steps_per_iter': 80, 'sample_trun': 3.0, 'sample_time': 480} else: raise NotImplementedError() if args.file_path is not None: # visualization for toy data verbose = True kwargs['vis'] = True batch_results = motion_denoiser.optimize(noisy_joints3d, gt_poses, args.time_strategy, verbose=verbose, **kwargs) return batch_results def main(args): def find_npz_files(data_dir): npz_files = [] for root, dirs, files in os.walk(data_dir): for file in files: if file.endswith('.npz'): npz_files.append(os.path.relpath(os.path.join(root, file), data_dir)) return npz_files torch.manual_seed(42) config = FLAGS.config POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 model = ScoreModelFC( config,
FLAGS = flags.FLAGS config_flags.DEFINE_config_file( "config", None, "Visualizing configuration.", lock_config=False) flags.mark_flags_as_required(["config"]) bg_img = np.ones([512, 384, 3]) * 255 # background canvas focal = [1500, 1500] princpt = [200, 192] def parse_args(argv): parser = argparse_flags.ArgumentParser(description='motion denosing (3D noisy joints -> clean poses)') parser.add_argument('--dataset-folder', type=str, default='../data/AMASS/amass_processed', help='the folder includes necessary normalizing parameters') parser.add_argument('--version', type=str, default='version1', help='dataset version') parser.add_argument('--ckpt-path', type=str, default='./pretrained_models/axis-zscore-400k.pth') parser.add_argument('--bodymodel-path', type=str, default='../body_models/smplx/SMPLX_NEUTRAL.npz', help='load SMPLX') parser.add_argument('--outpath-folder', type=str, default='./output/test_results/motion_denoise') parser.add_argument('--noise-std', type=float, default=0.04, help='control added noise scales') parser.add_argument('--time-strategy', type=str, default='3', choices=['1', '2', '3'], help='random, fix, truncated annealing') parser.add_argument('--device', type=str, default='cuda:0') # data preparation parser.add_argument('--file-path', type=str, help='use toy data to run') parser.add_argument('--data-dir', type=str, default='../humor/out/amass_joints_noisy_fitting/results_out', help='the whole AMASS testset, (output from HuMoR)') parser.add_argument('--dataset', type=str, default='AMASS') args = parser.parse_args(argv[1:]) return args class MotionDenoise(object): def __init__(self, config, args, diffusion_model, body_model, sde_N=1000, dposer_weight=1.0, out_path=None, debug=False, batch_size=1): self.args = args self.debug = debug self.device = args.device self.body_model = body_model self.dposer_weight = dposer_weight self.out_path = out_path # only needed for visualization self.batch_size = batch_size self.betas = torch.zeros((batch_size, 10), device=self.device) self.poses = torch.randn((batch_size, 63), device=self.device) * 0.01 self.Normalizer = Posenormalizer( data_path=f'{args.dataset_folder}/{args.version}/train', normalize=config.data.normalize, min_max=config.data.min_max, rot_rep=config.data.rot_rep, device=args.device) if config.training.sde.lower() == 'vpsde': sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales) elif config.training.sde.lower() == 'subvpsde': sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=config.model.num_scales) elif config.training.sde.lower() == 'vesde': sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=config.model.num_scales) else: raise NotImplementedError(f"SDE {config.training.sde} unknown.") sde.N = sde_N # discrete sampling steps self.sde = sde self.score_fn = mutils.get_score_fn(sde, diffusion_model, train=False, continuous=config.training.continuous) self.rsde = sde.reverse(self.score_fn, False) # L2 loss self.loss_fn = nn.MSELoss(reduction='none') def one_step_denoise(self, x_t, t): drift, diffusion, alpha, sigma_2, score = self.rsde.sde(x_t, t, guide=True) x_0_hat = (x_t + sigma_2[:, None] * score) / alpha SNR = alpha / torch.sqrt(sigma_2)[:, None] return x_0_hat.detach(), SNR def multi_step_denoise(self, x_t, t, t_end, N=10): time_traj = linear_interpolation(t, t_end, N + 1) x_current = x_t for i in range(N): t_current = time_traj[i] t_before = time_traj[i + 1] alpha_current, sigma_current = self.sde.return_alpha_sigma(t_current) alpha_before, sigma_before = self.sde.return_alpha_sigma(t_before) score = self.score_fn(x_current, t_current, condition=None, mask=None) score = -score * sigma_current[:, None] # score to noise prediction x_current = alpha_before / alpha_current * (x_current - sigma_current[:, None] * score) + sigma_before[ :, None] * score alpha, sigma = self.sde.return_alpha_sigma(time_traj[0]) SNR = alpha / sigma[:, None] return x_current.detach(), SNR # In our experiments, we found multi-step denoise will lead to worse results. def DPoser_loss(self, x_0, vec_t, quan_t, weighted=False, multi_denoise=False): # x_0: [B, j*6], vec_t: [B], quan_t: [1] z = torch.randn_like(x_0) mean, std = self.sde.marginal_prob(x_0, vec_t) perturbed_data = mean + std[:, None] * z # if multi_denoise: denoise_data, SNR = self.multi_step_denoise(perturbed_data, vec_t, t_end=vec_t / (2 * 10), N=10) else: denoise_data, SNR = self.one_step_denoise(perturbed_data, vec_t) if weighted: weight = 0.5 * torch.sqrt(1+SNR) else: weight = 0.5 dposer_loss = torch.sum(weight * self.loss_fn(x_0, denoise_data)) / self.batch_size return dposer_loss def RED_Diff(self, x_0, vec_t, quan_t): z = torch.randn_like(x_0) mean, std = self.sde.marginal_prob(x_0, vec_t) perturbed_data = mean + std[:, None] * z # _, _, alpha, sigma_2, score = self.rsde.sde(perturbed_data, vec_t, guide=True) score = -score * std[:, None] # score to noise prediction inverse_SNR = torch.sqrt(sigma_2) / alpha[:, 0] weight = inverse_SNR guidance = torch.mean(weight * torch.einsum('ij,ij->i', (score - z).detach(), x_0)) return guidance def get_loss_weights(self): """Set loss weights""" loss_weight = {'temp': lambda cst, it: 10. ** 1 * cst * (1 + it), 'data': lambda cst, it: 10. ** 2 * cst / (1 + it * it), 'dposer': lambda cst, it: 10. ** -1 * cst * (1 + it) * self.dposer_weight } return loss_weight @staticmethod def backward_step(loss_dict, weight_dict, it): w_loss = dict() for k in loss_dict: w_loss[k] = weight_dict[k](loss_dict[k], it) tot_loss = list(w_loss.values()) tot_loss = torch.stack(tot_loss).sum() return tot_loss @staticmethod def visualize(vertices, faces, out_path, render=False, prefix='out', save_mesh=False, faster=False, device=None): # save meshes and rendered results if needed os.makedirs(out_path, exist_ok=True) if save_mesh: vertices = vertices.detach().cpu() faces = faces.cpu() os.makedirs(os.path.join(out_path, 'meshes'), exist_ok=True) [save_obj(vertices[i], faces, os.path.join(out_path, 'meshes', '{}_{:04}.obj'.format(prefix, i))) for i in range(len(vertices))] if render: os.makedirs(os.path.join(out_path, 'renders'), exist_ok=True) if faster: assert device is not None target_path = os.path.join(out_path, 'renders') faster_render(vertices, faces, target_path, prefix + '_{:04}.jpg', device) else: vertices = vertices.detach().cpu() faces = faces.cpu() for i in range(len(vertices)): rendered_img = render_mesh(bg_img, vertices[i], faces, {'focal': focal, 'princpt': princpt}, view='front') cv2.imwrite(os.path.join(out_path, 'renders', '{}_{:04}.png'.format(prefix, i)), rendered_img) def optimize(self, joints3d, gt_poses=None, time_strategy='1', sample_trun=2.0, sample_time=990, iterations=5, steps_per_iter=50, verbose=False, vis=False): # create initial SMPL joints and vertices for visualition(to be used for data term) smpl_init = self.body_model(betas=self.betas, pose_body=self.poses) smpl_gt = self.body_model(betas=self.betas, pose_body=gt_poses) if vis: vis_skeletons(joints3d.detach().cpu().numpy(), os.path.join(self.out_path, 'renders')) print('skeleton figures saved') self.visualize(smpl_gt.v, smpl_gt.f, self.out_path, render=True, prefix='gt', device=self.device) joint_error = joints3d - smpl_gt.Jtr[:, :22] init_MPJPE = torch.mean(torch.sqrt(torch.sum(joint_error * joint_error, dim=2)), dim=1) * 100. if verbose: print('before denoising:{:0.8f} cm'.format(init_MPJPE.mean())) init_joints = joints3d.detach() # Optimizer smpl_init.pose_body.requires_grad = True optimizer = torch.optim.Adam([smpl_init.pose_body], 0.03, betas=(0.9, 0.999)) # Get loss_weights weight_dict = self.get_loss_weights() eps = 1e-3 timesteps = torch.linspace(self.sde.T, eps, self.sde.N, device=self.device) total_steps = iterations*steps_per_iter for it in range(iterations): if verbose: loop = tqdm(range(steps_per_iter)) loop.set_description('Optimizing SMPL poses') else: loop = range(steps_per_iter) for i in loop: step = it * steps_per_iter + i optimizer.zero_grad() loss_dict = dict() ''' ************* DPoser loss *********** ''' poses = self.Normalizer.offline_normalize(smpl_init.pose_body, from_axis=True) if time_strategy == '1': # not recommend quan_t = torch.randint(self.sde.N, [1]) elif time_strategy == '2': quan_t = torch.tensor(sample_time) elif time_strategy == '3': quan_t = self.sde.N - math.floor(torch.tensor(total_steps - step - 1) * (self.sde.N / (sample_trun * total_steps))) - 2 else: raise NotImplementedError('unsupported time sampling strategy') t = timesteps[quan_t] vec_t = torch.ones(self.batch_size, device=self.device) * t loss_dict['dposer'] = self.DPoser_loss(poses, vec_t, quan_t) ''' *********** DPoser loss ************ ''' # calculate temporal loss between mesh vertices smpl_init = self.body_model(betas=smpl_init.betas, pose_body=smpl_init.pose_body) temp_term = smpl_init.v[:-1] - smpl_init.v[1:] loss_dict['temp'] = torch.mean(torch.sqrt(torch.sum(temp_term * temp_term, dim=2))) # calculate data term from inital noisy pose data_term = smpl_init.Jtr[:, :22] - init_joints data_term = torch.mean(torch.sqrt(torch.sum(data_term * data_term, dim=2))) if data_term > 0: # for nans loss_dict['data'] = data_term # Get total loss for backward pass tot_loss = self.backward_step(loss_dict, weight_dict, it) tot_loss.backward() optimizer.step() if verbose: # only for check joint_error = smpl_init.Jtr[:, :22] - smpl_gt.Jtr[:, :22] joint_error = torch.mean(torch.sqrt(torch.sum(joint_error * joint_error, dim=2))) * 100. l_str = 'Step: {} Iter: {}'.format(it, i) l_str += ' j2j : {:0.8f}'.format(joint_error) l_str += ' total : {:0.8f}'.format(tot_loss) for k in loss_dict: l_str += ', {}: {:0.8f}'.format(k, loss_dict[k].mean().item()) loop.set_description(l_str) # create final results, the smoothing can be used to create consistent demo videos # Note that we do not use the smoothing for evaluation in our paper smooth_pose = gaussian_smoothing(smpl_init.pose_body, window_size=3, sigma=2) idx = [0, -1] smooth_pose[idx] = smpl_init.pose_body[idx] smpl_init = self.body_model(betas=self.betas, pose_body=smooth_pose) if vis: self.visualize(smpl_init.v, smpl_init.f, self.out_path, render=True, prefix='out', device=self.device) seq_to_video(os.path.join(self.out_path, 'renders'), os.path.join(self.out_path, 'merges'), video_path=os.path.join(self.out_path, 'motion.mp4')) joint_error = smpl_init.Jtr[:, :22] - smpl_gt.Jtr[:, :22] vert_error = smpl_init.v - smpl_gt.v MPJPE = torch.mean(torch.sqrt(torch.sum(joint_error * joint_error, dim=2)), dim=1) * 100. # remain batch dim MPVPE = torch.mean(torch.sqrt(torch.sum(vert_error * vert_error, dim=2)), dim=1) * 100. if verbose: print('after denoising:{:0.8f} cm'.format(MPJPE.mean())) results_dict = {'init_MPJPE': init_MPJPE.detach().cpu().numpy(), 'MPJPE': MPJPE.detach().cpu().numpy(), 'MPVPE': MPVPE.detach().cpu().numpy()} return results_dict def denoise(config, args, model, gt_file, out_path, std=0.04, verbose=False): motion_data_gt = np.load(gt_file)['pose_body'] batch_size = len(motion_data_gt) gt_poses = torch.from_numpy(motion_data_gt.astype(np.float32)).to(args.device) # [batchsize, 63] # load body model body_model = BodyModel(bm_path=args.bodymodel_path, model_type='smplx', batch_size=batch_size, num_betas=10).to( args.device) # generate noise on joints std = std joints3d = body_model(pose_body=gt_poses).Jtr[:, :22] noisy_joints3d = joints3d + std * torch.randn(*joints3d.shape, device=joints3d.device) if args.time_strategy in ['1']: sde_N = 500 dposer_weight = 1e-1 else: sde_N = 500 dposer_weight = 1.0 # If you try to reduce 'sample_trun' or 'sample_time', reduce weight too for converge. # create Motion denoiser layer motion_denoiser = MotionDenoise(config, args, model, sde_N=sde_N, body_model=body_model, dposer_weight=dposer_weight, # For axis setting 1e-1, batch_size=batch_size, out_path=out_path) if std == 0.02: kwargs = {'iterations': 3, 'steps_per_iter': 40, 'sample_trun': 10.0, 'sample_time': 495} elif std == 0.04: kwargs = {'iterations': 3, 'steps_per_iter': 60, 'sample_trun': 4.0, 'sample_time': 490} elif std == 0.1: kwargs = {'iterations': 3, 'steps_per_iter': 80, 'sample_trun': 3.0, 'sample_time': 480} else: raise NotImplementedError() if args.file_path is not None: # visualization for toy data verbose = True kwargs['vis'] = True batch_results = motion_denoiser.optimize(noisy_joints3d, gt_poses, args.time_strategy, verbose=verbose, **kwargs) return batch_results def main(args): def find_npz_files(data_dir): npz_files = [] for root, dirs, files in os.walk(data_dir): for file in files: if file.endswith('.npz'): npz_files.append(os.path.relpath(os.path.join(root, file), data_dir)) return npz_files torch.manual_seed(42) config = FLAGS.config POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 model = ScoreModelFC( config,
n_poses=N_POSES,
14
2023-11-29 15:55:50+00:00
16k
KylinYee/R2-Talker-code
main.py
[ { "identifier": "NeRFDataset", "path": "nerf/provider.py", "snippet": "class NeRFDataset:\n def __init__(self, opt, device, type='train', downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.type = type # train, val, test\n self.downscale = downscale\n self.root_path = opt.path\n self.preload = opt.preload # 0 = disk, 1 = cpu, 2 = gpu\n self.scale = opt.scale # camera radius scale to make sure camera are inside the bounding box.\n self.offset = opt.offset # camera offset\n self.bound = opt.bound # bounding box half length, also used as the radius to random sample poses.\n self.fp16 = opt.fp16\n\n self.start_index = opt.data_range[0]\n self.end_index = opt.data_range[1]\n\n self.training = self.type in ['train', 'all', 'trainval']\n self.num_rays = self.opt.num_rays if self.training else -1\n\n # load nerf-compatible format data.\n \n # load all splits (train/valid/test)\n if type == 'all':\n transform_paths = glob.glob(os.path.join(self.root_path, '*.json'))\n transform = None\n for transform_path in transform_paths:\n with open(transform_path, 'r') as f:\n tmp_transform = json.load(f)\n if transform is None:\n transform = tmp_transform\n else:\n transform['frames'].extend(tmp_transform['frames'])\n # load train and val split\n elif type == 'trainval':\n with open(os.path.join(self.root_path, f'transforms_train.json'), 'r') as f:\n transform = json.load(f)\n with open(os.path.join(self.root_path, f'transforms_val.json'), 'r') as f:\n transform_val = json.load(f)\n transform['frames'].extend(transform_val['frames'])\n # only load one specified split\n else:\n # no test, use val as test\n _split = 'val' if type == 'test' else type\n with open(os.path.join(self.root_path, f'transforms_{_split}.json'), 'r') as f:\n transform = json.load(f)\n\n # load image size\n if 'h' in transform and 'w' in transform:\n self.H = int(transform['h']) // downscale\n self.W = int(transform['w']) // downscale\n else:\n self.H = int(transform['cy']) * 2 // downscale\n self.W = int(transform['cx']) * 2 // downscale\n \n # read images\n frames = transform[\"frames\"]\n\n # use a slice of the dataset\n if self.end_index == -1: # abuse...\n self.end_index = len(frames)\n\n frames = frames[self.start_index:self.end_index]\n\n # use a subset of dataset.\n if type == 'train':\n if self.opt.part:\n frames = frames[::10] # 1/10 frames\n elif self.opt.part2:\n frames = frames[:375] # first 15s\n elif type == 'val':\n frames = frames[:100] # first 100 frames for val\n\n print(f'[INFO] load {len(frames)} {type} frames.')\n\n # only load pre-calculated aud features when not live-streaming\n if not self.opt.asr:\n # empty means the default self-driven extracted features.\n if self.opt.aud == '':\n if self.opt.cond_type == 'eo':\n aud_features = np.load(os.path.join(self.root_path, 'aud_eo.npy'))\n elif self.opt.cond_type == 'ds':\n aud_features = np.load(os.path.join(self.root_path, 'aud_ds.npy'))\n elif self.opt.cond_type == 'idexp':\n aud_features = np.load(os.path.join(self.root_path, 'aud_idexp.npy'))\n else:\n aud_features = np.load(os.path.join(self.root_path, 'aud.npy'))\n # cross-driven extracted features. \n else:\n aud_features = np.load(self.opt.aud)\n\n if self.opt.method == 'genefaceDagger':\n video_idexp_lm3d_mean = aud_features.mean(axis=0).reshape([1,68,3])\n video_idexp_lm3d_std = aud_features.std(axis=0).reshape([1,68,3])\n aud_features = (aud_features - video_idexp_lm3d_mean) / video_idexp_lm3d_std\n\n\n aud_features = torch.from_numpy(aud_features)\n\n\n # support both [N, 16] labels and [N, 16, K] logits\n if len(aud_features.shape) == 3:\n # if self.opt.cond_type in ['eo', 'ds']:\n # aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16] \n\n if self.opt.emb:\n print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')\n aud_features = aud_features.argmax(1) # [N, 16]\n \n else:\n assert self.opt.emb, \"aud only provide labels, must use --emb\"\n aud_features = aud_features.long()\n\n\n print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')\n\n self.torso_img = []\n self.images = []\n\n self.poses = []\n self.exps = []\n\n self.auds = []\n self.face_rect = []\n self.lips_rect = []\n self.eye_area = []\n\n for f in tqdm.tqdm(frames, desc=f'Loading {type} data'):\n\n f_path = os.path.join(self.root_path, 'gt_imgs', str(f['img_id']) + '.jpg')\n\n if not os.path.exists(f_path):\n print('[WARN]', f_path, 'NOT FOUND!')\n continue\n \n pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]\n pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)\n self.poses.append(pose)\n\n if self.preload > 0:\n image = cv2.imread(f_path, cv2.IMREAD_UNCHANGED) # [H, W, 3] o [H, W, 4]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.images.append(image)\n else:\n self.images.append(f_path)\n\n # load frame-wise bg\n \n torso_img_path = os.path.join(self.root_path, 'torso_imgs', str(f['img_id']) + '.png')\n\n if self.preload > 0:\n torso_img = cv2.imread(torso_img_path, cv2.IMREAD_UNCHANGED) # [H, W, 4]\n torso_img = cv2.cvtColor(torso_img, cv2.COLOR_BGRA2RGBA)\n torso_img = torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.torso_img.append(torso_img)\n else:\n self.torso_img.append(torso_img_path)\n\n # find the corresponding audio to the image frame\n if not self.opt.asr and self.opt.aud == '':\n aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...\n self.auds.append(aud)\n\n # load lms and extract face\n lms = np.loadtxt(os.path.join(self.root_path, 'ori_imgs', str(f['img_id']) + '.lms')) # [68, 2]\n\n xmin, xmax = int(lms[31:36, 1].min()), int(lms[:, 1].max())\n ymin, ymax = int(lms[:, 0].min()), int(lms[:, 0].max())\n self.face_rect.append([xmin, xmax, ymin, ymax])\n\n if self.opt.exp_eye:\n eyes_left = slice(36, 42)\n eyes_right = slice(42, 48)\n\n area_left = polygon_area(lms[eyes_left, 0], lms[eyes_left, 1])\n area_right = polygon_area(lms[eyes_right, 0], lms[eyes_right, 1])\n\n # area percentage of two eyes of the whole image...\n area = (area_left + area_right) / (self.H * self.W) * 100\n\n self.eye_area.append(area)\n\n if self.opt.finetune_lips:\n lips = slice(48, 60)\n xmin, xmax = int(lms[lips, 1].min()), int(lms[lips, 1].max())\n ymin, ymax = int(lms[lips, 0].min()), int(lms[lips, 0].max())\n\n # padding to H == W\n cx = (xmin + xmax) // 2\n cy = (ymin + ymax) // 2\n\n l = max(xmax - xmin, ymax - ymin) // 2\n xmin = max(0, cx - l)\n xmax = min(self.H, cx + l)\n ymin = max(0, cy - l)\n ymax = min(self.W, cy + l)\n\n self.lips_rect.append([xmin, xmax, ymin, ymax])\n \n # load pre-extracted background image (should be the same size as training image...)\n\n if self.opt.bg_img == 'white': # special\n bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)\n elif self.opt.bg_img == 'black': # special\n bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)\n else: # load from file\n # default bg\n if self.opt.bg_img == '':\n self.opt.bg_img = os.path.join(self.root_path, 'bc.jpg')\n bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]\n if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:\n bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)\n bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)\n bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.bg_img = bg_img\n\n self.poses = np.stack(self.poses, axis=0)\n\n # smooth camera path...\n if self.opt.smooth_path:\n self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)\n \n self.poses = torch.from_numpy(self.poses) # [N, 4, 4]\n\n if self.preload > 0:\n self.images = torch.from_numpy(np.stack(self.images, axis=0)) # [N, H, W, C]\n self.torso_img = torch.from_numpy(np.stack(self.torso_img, axis=0)) # [N, H, W, C]\n else:\n self.images = np.array(self.images)\n self.torso_img = np.array(self.torso_img)\n\n if self.opt.asr:\n # live streaming, no pre-calculated auds\n self.auds = None\n else:\n # auds corresponding to images\n if self.opt.aud == '':\n self.auds = torch.stack(self.auds, dim=0) # [N, 32, 16]\n # auds is novel, may have a different length with images\n else:\n self.auds = aud_features\n \n self.bg_img = torch.from_numpy(self.bg_img)\n\n if self.opt.exp_eye:\n self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]\n print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')\n\n if self.opt.smooth_eye:\n\n # naive 5 window average\n ori_eye = self.eye_area.copy()\n for i in range(ori_eye.shape[0]):\n start = max(0, i - 1)\n end = min(ori_eye.shape[0], i + 2)\n self.eye_area[i] = ori_eye[start:end].mean()\n\n self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]\n\n \n # calculate mean radius of all camera poses\n self.radius = self.poses[:, :3, 3].norm(dim=-1).mean(0).item()\n #print(f'[INFO] dataset camera poses: radius = {self.radius:.4f}, bound = {self.bound}')\n\n \n # [debug] uncomment to view all training poses.\n # visualize_poses(self.poses.numpy())\n\n # [debug] uncomment to view examples of randomly generated poses.\n # visualize_poses(rand_poses(100, self.device, radius=self.radius).cpu().numpy())\n\n if self.preload > 1:\n self.poses = self.poses.to(self.device)\n\n if self.auds is not None:\n self.auds = self.auds.to(self.device)\n\n self.bg_img = self.bg_img.to(torch.half).to(self.device)\n\n self.torso_img = self.torso_img.to(torch.half).to(self.device)\n self.images = self.images.to(torch.half).to(self.device)\n \n if self.opt.exp_eye:\n self.eye_area = self.eye_area.to(self.device)\n\n # load intrinsics\n if 'focal_len' in transform:\n fl_x = fl_y = transform['focal_len']\n elif 'fl_x' in transform or 'fl_y' in transform:\n fl_x = (transform['fl_x'] if 'fl_x' in transform else transform['fl_y']) / downscale\n fl_y = (transform['fl_y'] if 'fl_y' in transform else transform['fl_x']) / downscale\n elif 'camera_angle_x' in transform or 'camera_angle_y' in transform:\n # blender, assert in radians. already downscaled since we use H/W\n fl_x = self.W / (2 * np.tan(transform['camera_angle_x'] / 2)) if 'camera_angle_x' in transform else None\n fl_y = self.H / (2 * np.tan(transform['camera_angle_y'] / 2)) if 'camera_angle_y' in transform else None\n if fl_x is None: fl_x = fl_y\n if fl_y is None: fl_y = fl_x\n else:\n raise RuntimeError('Failed to load focal length, please check the transforms.json!')\n\n cx = (transform['cx'] / downscale) if 'cx' in transform else (self.W / 2)\n cy = (transform['cy'] / downscale) if 'cy' in transform else (self.H / 2)\n \n self.intrinsics = np.array([fl_x, fl_y, cx, cy])\n\n # directly build the coordinate meshgrid in [-1, 1]^2\n self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]\n\n\n def mirror_index(self, index):\n size = self.poses.shape[0]\n turn = index // size\n res = index % size\n if turn % 2 == 0:\n return res\n else:\n return size - res - 1\n\n\n def collate(self, index):\n\n B = len(index) # a list of length 1\n # assert B == 1\n\n results = {}\n\n # audio use the original index\n if self.auds is not None:\n if self.opt.cond_type == 'idexp':\n auds = get_audio_features(self.auds, self.opt.att, index[0], smooth_win_size=5).to(self.device)\n else:\n auds = get_audio_features(self.auds, self.opt.att, index[0]).to(self.device)\n\n results['auds'] = auds\n\n # head pose and bg image may mirror (replay --> <-- --> <--).\n index[0] = self.mirror_index(index[0])\n\n poses = self.poses[index].to(self.device) # [B, 4, 4]\n \n if self.training and self.opt.finetune_lips:\n rect = self.lips_rect[index[0]]\n results['rect'] = rect\n rays = get_rays(poses, self.intrinsics, self.H, self.W, -1, rect=rect)\n else:\n rays = get_rays(poses, self.intrinsics, self.H, self.W, self.num_rays, self.opt.patch_size)\n\n results['index'] = index # for ind. code\n results['H'] = self.H\n results['W'] = self.W\n results['rays_o'] = rays['rays_o']\n results['rays_d'] = rays['rays_d']\n\n # get a mask for rays inside rect_face\n if self.training:\n xmin, xmax, ymin, ymax = self.face_rect[index[0]]\n face_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['face_mask'] = face_mask\n\n if self.opt.exp_eye:\n results['eye'] = self.eye_area[index].to(self.device) # [1]\n else:\n results['eye'] = None\n\n # load bg\n bg_torso_img = self.torso_img[index]\n if self.preload == 0: # on the fly loading\n bg_torso_img = cv2.imread(bg_torso_img[0], cv2.IMREAD_UNCHANGED) # [H, W, 4]\n bg_torso_img = cv2.cvtColor(bg_torso_img, cv2.COLOR_BGRA2RGBA)\n bg_torso_img = bg_torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n bg_torso_img = torch.from_numpy(bg_torso_img).unsqueeze(0)\n bg_torso_img = bg_torso_img[..., :3] * bg_torso_img[..., 3:] + self.bg_img * (1 - bg_torso_img[..., 3:])\n bg_torso_img = bg_torso_img.view(B, -1, 3).to(self.device)\n\n if not self.opt.torso:\n bg_img = bg_torso_img\n else:\n bg_img = self.bg_img.view(1, -1, 3).repeat(B, 1, 1).to(self.device)\n\n if self.training:\n bg_img = torch.gather(bg_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n\n results['bg_color'] = bg_img\n\n if self.opt.torso and self.training:\n bg_torso_img = torch.gather(bg_torso_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n results['bg_torso_color'] = bg_torso_img\n\n images = self.images[index] # [B, H, W, 3/4]\n if self.preload == 0:\n images = cv2.imread(images[0], cv2.IMREAD_UNCHANGED) # [H, W, 3]\n images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)\n images = images.astype(np.float32) / 255 # [H, W, 3]\n images = torch.from_numpy(images).unsqueeze(0)\n images = images.to(self.device)\n\n if self.training:\n C = images.shape[-1]\n images = torch.gather(images.view(B, -1, C), 1, torch.stack(C * [rays['inds']], -1)) # [B, N, 3/4]\n \n results['images'] = images\n\n if self.training:\n bg_coords = torch.gather(self.bg_coords, 1, torch.stack(2 * [rays['inds']], -1)) # [1, N, 2]\n else:\n bg_coords = self.bg_coords # [1, N, 2]\n\n results['bg_coords'] = bg_coords\n\n results['poses'] = convert_poses(poses) # [B, 6]\n results['poses_matrix'] = poses # [B, 4, 4]\n \n return results\n\n def dataloader(self):\n\n if self.training:\n # training len(poses) == len(auds)\n size = self.poses.shape[0]\n else:\n # test with novel auds, then use its length\n if self.auds is not None:\n size = self.auds.shape[0]\n # live stream test, use 2 * len(poses), so it naturally mirrors.\n else:\n size = 2 * self.poses.shape[0]\n\n loader = DataLoader(list(range(size)), batch_size=1, collate_fn=self.collate, shuffle=self.training, num_workers=0)\n loader._data = self # an ugly fix... we need poses in trainer.\n\n # do evaluate if has gt images and use self-driven setting\n loader.has_gt = (self.opt.aud == '')\n\n return loader " }, { "identifier": "NeRFGUI", "path": "nerf/gui.py", "snippet": "class NeRFGUI:\n def __init__(self, opt, trainer, data_loader, debug=True):\n self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.\n self.W = opt.W\n self.H = opt.H\n self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)\n self.debug = debug\n self.training = False\n self.step = 0 # training step \n\n self.trainer = trainer\n self.data_loader = data_loader\n\n # override with dataloader's intrinsics\n self.W = data_loader._data.W\n self.H = data_loader._data.H\n self.cam.update_intrinsics(data_loader._data.intrinsics)\n\n # use dataloader's pose\n pose_init = data_loader._data.poses[0]\n self.cam.update_pose(pose_init.detach().cpu().numpy())\n\n # use dataloader's bg\n bg_img = data_loader._data.bg_img #.view(1, -1, 3)\n if self.H != bg_img.shape[0] or self.W != bg_img.shape[1]:\n bg_img = F.interpolate(bg_img.permute(2, 0, 1).unsqueeze(0).contiguous(), (self.H, self.W), mode='bilinear').squeeze(0).permute(1, 2, 0).contiguous()\n self.bg_color = bg_img.view(1, -1, 3)\n\n # audio features (from dataloader, only used in non-playing mode)\n self.audio_features = data_loader._data.auds # [N, 29, 16]\n self.audio_idx = 0\n\n # control eye\n self.eye_area = None if not self.opt.exp_eye else data_loader._data.eye_area.mean().item()\n\n # playing seq from dataloader, or pause.\n self.playing = False\n self.loader = iter(data_loader)\n\n self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32)\n self.need_update = True # camera moved, should reset accumulation\n self.spp = 1 # sample per pixel\n self.mode = 'image' # choose from ['image', 'depth']\n\n self.dynamic_resolution = False # assert False!\n self.downscale = 1\n self.train_steps = 16\n\n self.ind_index = 0\n self.ind_num = trainer.model.individual_codes.shape[0]\n\n # build asr\n if self.opt.asr:\n self.asr = ASR(opt)\n \n dpg.create_context()\n self.register_dpg()\n self.test_step()\n \n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.opt.asr:\n self.asr.stop() \n dpg.destroy_context()\n\n def train_step(self):\n\n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n outputs = self.trainer.train_gui(self.data_loader, step=self.train_steps)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n self.step += self.train_steps\n self.need_update = True\n\n dpg.set_value(\"_log_train_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_train_log\", f'step = {self.step: 5d} (+{self.train_steps: 2d}), loss = {outputs[\"loss\"]:.4f}, lr = {outputs[\"lr\"]:.5f}')\n\n # dynamic train steps\n # max allowed train time per-frame is 500 ms\n full_t = t / self.train_steps * 16\n train_steps = min(16, max(4, int(16 * 500 / full_t)))\n if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8:\n self.train_steps = train_steps\n\n def prepare_buffer(self, outputs):\n if self.mode == 'image':\n return outputs['image']\n else:\n return np.expand_dims(outputs['depth'], -1).repeat(3, -1)\n\n def test_step(self):\n\n if self.need_update or self.spp < self.opt.max_spp:\n \n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n if self.playing:\n try:\n data = next(self.loader)\n except StopIteration:\n self.loader = iter(self.data_loader)\n data = next(self.loader)\n \n if self.opt.asr:\n # use the live audio stream\n data['auds'] = self.asr.get_next_feat()\n\n outputs = self.trainer.test_gui_with_data(data, self.W, self.H)\n\n # sync local camera pose\n self.cam.update_pose(data['poses_matrix'][0].detach().cpu().numpy())\n \n else:\n if self.audio_features is not None:\n auds = get_audio_features(self.audio_features, self.opt.att, self.audio_idx)\n else:\n auds = None\n outputs = self.trainer.test_gui(self.cam.pose, self.cam.intrinsics, self.W, self.H, auds, self.eye_area, self.ind_index, self.bg_color, self.spp, self.downscale)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n # update dynamic resolution\n if self.dynamic_resolution:\n # max allowed infer time per-frame is 200 ms\n full_t = t / (self.downscale ** 2)\n downscale = min(1, max(1/4, math.sqrt(200 / full_t)))\n if downscale > self.downscale * 1.2 or downscale < self.downscale * 0.8:\n self.downscale = downscale\n\n if self.need_update:\n self.render_buffer = self.prepare_buffer(outputs)\n self.spp = 1\n self.need_update = False\n else:\n self.render_buffer = (self.render_buffer * self.spp + self.prepare_buffer(outputs)) / (self.spp + 1)\n self.spp += 1\n \n if self.playing:\n self.need_update = True\n\n dpg.set_value(\"_log_infer_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_resolution\", f'{int(self.downscale * self.W)}x{int(self.downscale * self.H)}')\n dpg.set_value(\"_log_spp\", self.spp)\n dpg.set_value(\"_texture\", self.render_buffer)\n\n \n def register_dpg(self):\n\n ### register texture \n\n with dpg.texture_registry(show=False):\n dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag=\"_texture\")\n\n ### register window\n\n # the rendered image, as the primary window\n with dpg.window(tag=\"_primary_window\", width=self.W, height=self.H):\n\n # add the texture\n dpg.add_image(\"_texture\")\n\n # dpg.set_primary_window(\"_primary_window\", True)\n\n dpg.show_tool(dpg.mvTool_Metrics)\n\n # control window\n with dpg.window(label=\"Control\", tag=\"_control_window\", width=400, height=300):\n\n # button theme\n with dpg.theme() as theme_button:\n with dpg.theme_component(dpg.mvButton):\n dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))\n dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)\n\n # time\n if not self.opt.test:\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train time: \")\n dpg.add_text(\"no data\", tag=\"_log_train_time\") \n\n with dpg.group(horizontal=True):\n dpg.add_text(\"Infer time: \")\n dpg.add_text(\"no data\", tag=\"_log_infer_time\")\n \n with dpg.group(horizontal=True):\n dpg.add_text(\"SPP: \")\n dpg.add_text(\"1\", tag=\"_log_spp\")\n\n # train button\n if not self.opt.test:\n with dpg.collapsing_header(label=\"Train\", default_open=True):\n\n # train / stop\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train: \")\n\n def callback_train(sender, app_data):\n if self.training:\n self.training = False\n dpg.configure_item(\"_button_train\", label=\"start\")\n else:\n self.training = True\n dpg.configure_item(\"_button_train\", label=\"stop\")\n\n dpg.add_button(label=\"start\", tag=\"_button_train\", callback=callback_train)\n dpg.bind_item_theme(\"_button_train\", theme_button)\n\n def callback_reset(sender, app_data):\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n self.trainer.model.apply(fn=weight_reset)\n self.trainer.model.reset_extra_state() # for cuda_ray density_grid and step_counter\n self.need_update = True\n\n dpg.add_button(label=\"reset\", tag=\"_button_reset\", callback=callback_reset)\n dpg.bind_item_theme(\"_button_reset\", theme_button)\n\n # save ckpt\n with dpg.group(horizontal=True):\n dpg.add_text(\"Checkpoint: \")\n\n def callback_save(sender, app_data):\n self.trainer.save_checkpoint(full=True, best=False)\n dpg.set_value(\"_log_ckpt\", \"saved \" + os.path.basename(self.trainer.stats[\"checkpoints\"][-1]))\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"save\", tag=\"_button_save\", callback=callback_save)\n dpg.bind_item_theme(\"_button_save\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_ckpt\")\n \n # save mesh\n with dpg.group(horizontal=True):\n dpg.add_text(\"Marching Cubes: \")\n\n def callback_mesh(sender, app_data):\n self.trainer.save_mesh(resolution=256, threshold=10)\n dpg.set_value(\"_log_mesh\", \"saved \" + f'{self.trainer.name}_{self.trainer.epoch}.ply')\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"mesh\", tag=\"_button_mesh\", callback=callback_mesh)\n dpg.bind_item_theme(\"_button_mesh\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_mesh\")\n\n with dpg.group(horizontal=True):\n dpg.add_text(\"\", tag=\"_log_train_log\")\n\n \n # rendering options\n with dpg.collapsing_header(label=\"Options\", default_open=True):\n \n # playing\n with dpg.group(horizontal=True):\n dpg.add_text(\"Play: \")\n\n def callback_play(sender, app_data):\n \n if self.playing:\n self.playing = False\n dpg.configure_item(\"_button_play\", label=\"start\")\n else:\n self.playing = True\n dpg.configure_item(\"_button_play\", label=\"stop\")\n if self.opt.asr:\n self.asr.warm_up()\n self.need_update = True\n\n dpg.add_button(label=\"start\", tag=\"_button_play\", callback=callback_play)\n dpg.bind_item_theme(\"_button_play\", theme_button)\n\n # set asr\n if self.opt.asr:\n\n # clear queue button\n def callback_clear_queue(sender, app_data):\n \n self.asr.clear_queue()\n self.need_update = True\n\n dpg.add_button(label=\"clear\", tag=\"_button_clear_queue\", callback=callback_clear_queue)\n dpg.bind_item_theme(\"_button_clear_queue\", theme_button)\n\n # dynamic rendering resolution\n with dpg.group(horizontal=True):\n\n def callback_set_dynamic_resolution(sender, app_data):\n if self.dynamic_resolution:\n self.dynamic_resolution = False\n self.downscale = 1\n else:\n self.dynamic_resolution = True\n self.need_update = True\n\n # Disable dynamic resolution for face.\n # dpg.add_checkbox(label=\"dynamic resolution\", default_value=self.dynamic_resolution, callback=callback_set_dynamic_resolution)\n dpg.add_text(f\"{self.W}x{self.H}\", tag=\"_log_resolution\")\n\n # mode combo\n def callback_change_mode(sender, app_data):\n self.mode = app_data\n self.need_update = True\n \n dpg.add_combo(('image', 'depth'), label='mode', default_value=self.mode, callback=callback_change_mode)\n\n\n # bg_color picker\n def callback_change_bg(sender, app_data):\n self.bg_color = torch.tensor(app_data[:3], dtype=torch.float32) # only need RGB in [0, 1]\n self.need_update = True\n\n dpg.add_color_edit((255, 255, 255), label=\"Background Color\", width=200, tag=\"_color_editor\", no_alpha=True, callback=callback_change_bg)\n\n # audio index slider\n if not self.opt.asr:\n def callback_set_audio_index(sender, app_data):\n self.audio_idx = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Audio\", min_value=0, max_value=self.audio_features.shape[0] - 1, format=\"%d\", default_value=self.audio_idx, callback=callback_set_audio_index)\n\n # ind code index slider\n if self.opt.ind_dim > 0:\n def callback_set_individual_code(sender, app_data):\n self.ind_index = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Individual\", min_value=0, max_value=self.ind_num - 1, format=\"%d\", default_value=self.ind_index, callback=callback_set_individual_code)\n\n # eye area slider\n if self.opt.exp_eye:\n def callback_set_eye(sender, app_data):\n self.eye_area = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"eye area\", min_value=0, max_value=0.5, format=\"%.2f percent\", default_value=self.eye_area, callback=callback_set_eye)\n\n # fov slider\n def callback_set_fovy(sender, app_data):\n self.cam.fovy = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"FoV (vertical)\", min_value=1, max_value=120, format=\"%d deg\", default_value=self.cam.fovy, callback=callback_set_fovy)\n\n # dt_gamma slider\n def callback_set_dt_gamma(sender, app_data):\n self.opt.dt_gamma = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"dt_gamma\", min_value=0, max_value=0.1, format=\"%.5f\", default_value=self.opt.dt_gamma, callback=callback_set_dt_gamma)\n\n # max_steps slider\n def callback_set_max_steps(sender, app_data):\n self.opt.max_steps = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"max steps\", min_value=1, max_value=1024, format=\"%d\", default_value=self.opt.max_steps, callback=callback_set_max_steps)\n\n # aabb slider\n def callback_set_aabb(sender, app_data, user_data):\n # user_data is the dimension for aabb (xmin, ymin, zmin, xmax, ymax, zmax)\n self.trainer.model.aabb_infer[user_data] = app_data\n\n # also change train aabb ? [better not...]\n #self.trainer.model.aabb_train[user_data] = app_data\n\n self.need_update = True\n\n dpg.add_separator()\n dpg.add_text(\"Axis-aligned bounding box:\")\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"x\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=0)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=3)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"y\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=1)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=4)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"z\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=2)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=5)\n \n\n # debug info\n if self.debug:\n with dpg.collapsing_header(label=\"Debug\"):\n # pose\n dpg.add_separator()\n dpg.add_text(\"Camera Pose:\")\n dpg.add_text(str(self.cam.pose), tag=\"_log_pose\")\n\n\n ### register camera handler\n\n def callback_camera_drag_rotate(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.orbit(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_wheel_scale(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n delta = app_data\n\n self.cam.scale(delta)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_drag_pan(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.pan(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n with dpg.handler_registry():\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=callback_camera_drag_rotate)\n dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan)\n\n \n dpg.create_viewport(title='RAD-NeRF', width=1080, height=720, resizable=True)\n\n ### global theme\n with dpg.theme() as theme_no_padding:\n with dpg.theme_component(dpg.mvAll):\n # set all padding to 0 to avoid scroll bar\n dpg.add_theme_style(dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core)\n \n dpg.bind_item_theme(\"_primary_window\", theme_no_padding)\n\n dpg.setup_dearpygui()\n\n #dpg.show_metrics()\n\n dpg.show_viewport()\n\n\n def render(self):\n\n while dpg.is_dearpygui_running():\n # update texture every frame\n if self.training:\n self.train_step()\n # audio stream thread...\n if self.opt.asr and self.playing:\n # run 2 ASR steps (audio is at 50FPS, video is at 25FPS)\n for _ in range(2):\n self.asr.run_step()\n self.test_step()\n dpg.render_dearpygui_frame()" } ]
import torch import argparse from nerf.provider import NeRFDataset from nerf.gui import NeRFGUI from nerf.utils import * from nerf.network import NeRFNetwork, R2TalkerNeRF, GeneNeRFNetwork
12,896
parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--cond_type', type=str, default=None, help="type of driving condition: eo, ds, idexp") parser.add_argument('--method', type=str, default='r2talker', help="r2talker, genefaceDagger, rad-nerf") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." if opt.finetune_lips: # do not update density grid in finetune stage opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train:
# torch.autograd.set_detect_anomaly(True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', type=str) parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)") parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='workspace') parser.add_argument('--seed', type=int, default=0) ### training options parser.add_argument('--iters', type=int, default=200000, help="training iters") parser.add_argument('--lr', type=float, default=5e-3, help="initial learning rate") parser.add_argument('--lr_net', type=float, default=5e-4, help="initial learning rate") parser.add_argument('--ckpt', type=str, default='latest') parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--lambda_amb', type=float, default=0.1, help="lambda for ambient loss") parser.add_argument('--bg_img', type=str, default='', help="background image") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--cond_type', type=str, default=None, help="type of driving condition: eo, ds, idexp") parser.add_argument('--method', type=str, default='r2talker', help="r2talker, genefaceDagger, rad-nerf") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." if opt.finetune_lips: # do not update density grid in finetune stage opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train:
test_set = NeRFDataset(opt, device=device, type='train')
0
2023-12-04 12:51:59+00:00
16k
ubc-vision/vivid123
vivid123/generation_utils.py
[ { "identifier": "CLIPCameraProjection", "path": "vivid123/models/clip_camera_projection.py", "snippet": "class CLIPCameraProjection(ModelMixin, ConfigMixin):\n \"\"\"\n A Projection layer for CLIP embedding and camera embedding.\n Parameters:\n embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `clip_embed`\n additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the\n projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +\n additional_embeddings`.\n \"\"\"\n\n @register_to_config\n def __init__(self, embedding_dim: int = 768, additional_embeddings: int = 4):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.additional_embeddings = additional_embeddings\n\n self.input_dim = self.embedding_dim + self.additional_embeddings\n self.output_dim = self.embedding_dim\n\n self.proj = torch.nn.Linear(self.input_dim, self.output_dim)\n\n def forward(\n self,\n embedding: torch.FloatTensor,\n ):\n \"\"\"\n The [`PriorTransformer`] forward method.\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, input_dim)`):\n The currently input embeddings.\n Returns:\n The output embedding projection (`torch.FloatTensor` of shape `(batch_size, output_dim)`).\n \"\"\"\n proj_embedding = self.proj(embedding)\n return proj_embedding" }, { "identifier": "ViVid123Pipeline", "path": "vivid123/pipelines/vivid123_pipeline.py", "snippet": "class ViVid123Pipeline(TextToVideoSDPipeline):\n r\"\"\"\n Pipeline for text-to-video generation.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).\n tokenizer (`CLIPTokenizer`):\n A [`~transformers.CLIPTokenizer`] to tokenize text.\n unet ([`UNet3DConditionModel`]):\n A [`UNet3DConditionModel`] to denoise the encoded video latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n novel_view_unet: UNet2DConditionModel,\n image_encoder: CLIPVisionModelWithProjection,\n cc_projection: CLIPCameraProjection,\n ):\n super().__init__(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)\n\n self.register_modules(\n novel_view_unet=novel_view_unet,\n image_encoder=image_encoder,\n cc_projection=cc_projection,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n\n self.image_processor = VaeImageProcessor(\n vae_scale_factor=self.vae_scale_factor,\n do_convert_rgb=True,\n do_normalize=True,\n )\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs\n def check_inputs(\n self,\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n num_inference_steps=50,\n fusion_schedule=None,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n \n if fusion_schedule is None:\n raise ValueError(\n \"Fusion schedule is not provided.\"\n )\n \n if len(fusion_schedule[0]) != num_inference_steps or len(fusion_schedule[1]) != num_inference_steps:\n raise ValueError(\n \"Fusion schedule length does not match the number of timesteps.\"\n )\n \n def prepare_latents(\n self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None, noise_identical_accross_frames=False\n ):\n shape = (\n batch_size,\n num_channels_latents,\n num_frames if not noise_identical_accross_frames else 1,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n )\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n else:\n if latents.shape != shape:\n raise ValueError(\n f\"User-prepared `latents` must have shape {shape}, when noise_identical_accross_frames={noise_identical_accross_frames} but got {latents.shape}.\"\n )\n latents = latents.to(device)\n\n if noise_identical_accross_frames:\n latents = latents.repeat(1, 1, num_frames, 1, 1)\n \n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def prepare_img_latents(\n self, image, batch_size, dtype, device, generator=None, do_zero123_classifier_free_guidance=False\n ):\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n if isinstance(image, torch.Tensor):\n # Batch single image\n if image.ndim == 3:\n assert image.shape[0] == 3, \"Image outside a batch should be of shape (3, H, W)\"\n image = image.unsqueeze(0)\n\n assert image.ndim == 4, \"Image must have 4 dimensions\"\n\n # Check image is in [-1, 1]\n if image.min() < -1 or image.max() > 1:\n raise ValueError(\"Image should be in [-1, 1] range\")\n else:\n # preprocess image\n if isinstance(image, (PIL.Image.Image, np.ndarray)):\n image = [image]\n\n if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):\n image = [np.array(i.convert(\"RGB\"))[None, :] for i in image]\n image = np.concatenate(image, axis=0)\n elif isinstance(image, list) and isinstance(image[0], np.ndarray):\n image = np.concatenate([i[None, :] for i in image], axis=0)\n\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0\n\n image = image.to(device=device, dtype=dtype)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i]) for i in range(batch_size) # sample\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.mode()\n\n # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor\n if batch_size > init_latents.shape[0]:\n # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)\n num_images_per_prompt = batch_size // init_latents.shape[0]\n # duplicate image latents for each generation per prompt, using mps friendly method\n bs_embed, emb_c, emb_h, emb_w = init_latents.shape\n init_latents = init_latents.unsqueeze(1)\n init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)\n init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)\n\n # init_latents = torch.cat([init_latents]*2) if do_zero123_classifier_free_guidance else init_latents # follow zero123\n init_latents = (\n torch.cat([torch.zeros_like(init_latents), init_latents])\n if do_zero123_classifier_free_guidance\n else init_latents\n )\n\n init_latents = init_latents.to(device=device, dtype=dtype)\n return init_latents\n\n def CLIP_preprocess(self, x):\n dtype = x.dtype\n # following openai's implementation\n # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741\n # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608\n if isinstance(x, torch.Tensor):\n if x.min() < -1.0 or x.max() > 1.0:\n raise ValueError(\"Expected input tensor to have values in the range [-1, 1]\")\n x = kornia.geometry.resize(\n x.to(torch.float32), (224, 224), interpolation=\"bicubic\", align_corners=True, antialias=False\n ).to(dtype=dtype)\n x = (x + 1.0) / 2.0\n # renormalize according to clip\n x = kornia.enhance.normalize(\n x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])\n )\n return x\n\n # from stable_diffusion_image_variation\n def _encode_image(self, image, device, num_images_per_prompt, do_video_classifier_free_guidance):\n dtype = next(self.image_encoder.parameters()).dtype\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n if isinstance(image, torch.Tensor):\n # Batch single image\n if image.ndim == 3:\n assert image.shape[0] == 3, \"Image outside a batch should be of shape (3, H, W)\"\n image = image.unsqueeze(0)\n\n assert image.ndim == 4, \"Image must have 4 dimensions\"\n\n # Check image is in [-1, 1]\n if image.min() < -1 or image.max() > 1:\n raise ValueError(\"Image should be in [-1, 1] range\")\n else:\n # preprocess image\n if isinstance(image, (PIL.Image.Image, np.ndarray)):\n image = [image]\n\n if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):\n image = [np.array(i.convert(\"RGB\"))[None, :] for i in image]\n image = np.concatenate(image, axis=0)\n elif isinstance(image, list) and isinstance(image[0], np.ndarray):\n image = np.concatenate([i[None, :] for i in image], axis=0)\n\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0\n\n image = image.to(device=device, dtype=dtype)\n\n image = self.CLIP_preprocess(image)\n # if not isinstance(image, torch.Tensor):\n # # 0-255\n # print(\"Warning: image is processed by hf's preprocess, which is different from openai original's.\")\n # image = self.feature_extractor(images=image, return_tensors=\"pt\").pixel_values\n image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)\n image_embeddings = image_embeddings.unsqueeze(1)\n\n # duplicate image embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = image_embeddings.shape\n image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)\n image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n if do_video_classifier_free_guidance:\n negative_prompt_embeds = torch.zeros_like(image_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])\n\n return image_embeddings\n\n def _encode_pose(self, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):\n dtype = next(self.cc_projection.parameters()).dtype\n if isinstance(pose, torch.Tensor):\n pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)\n else:\n if isinstance(pose[0], list):\n pose = torch.Tensor(pose)\n else:\n pose = torch.Tensor([pose])\n x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)\n pose_embeddings = (\n torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)\n .unsqueeze(1)\n .to(device=device, dtype=dtype)\n ) # B, 1, 4\n # duplicate pose embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = pose_embeddings.shape\n pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)\n pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n if do_video_classifier_free_guidance:\n negative_prompt_embeds = torch.zeros_like(pose_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])\n return pose_embeddings\n\n def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):\n img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)\n pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)\n prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)\n prompt_embeds = self.cc_projection(prompt_embeds)\n # prompt_embeds = img_prompt_embeds\n # follow 0123, add negative prompt, after projection\n if do_video_classifier_free_guidance:\n negative_prompt = torch.zeros_like(prompt_embeds)\n prompt_embeds = torch.cat([negative_prompt, prompt_embeds])\n return prompt_embeds\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_frames: int = 16,\n num_inference_steps: int = 50,\n guidance_scale_video: float = 9.0,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"np\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n # vivid123 params below\n image: Optional[\n Union[\n torch.FloatTensor,\n PIL.Image.Image,\n np.ndarray,\n List[torch.FloatTensor],\n List[PIL.Image.Image],\n List[np.ndarray],\n ]\n ] = None,\n cam_pose_torch: Optional[torch.FloatTensor] = None,\n fusion_schedule: Optional[tuple[float]] = None,\n ddim_eta_0123: float = 1.0,\n guidance_scale_zero123: float = 3.0,\n noise_identical_accross_frames: bool = False,\n ):\n r\"\"\"\n The call function to the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.\n height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The height in pixels of the generated video.\n width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The width in pixels of the generated video.\n num_frames (`int`, *optional*, defaults to 16):\n The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds\n amounts to 2 seconds of video.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality videos at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide what to not include in image generation. If not defined, you need to\n pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies\n to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`. Latents should be of shape\n `(batch_size, num_channel, num_frames, height, width)`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not\n provided, text embeddings are generated from the `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If\n not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.\n output_type (`str`, *optional*, defaults to `\"np\"`):\n The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead\n of a plain tuple.\n callback (`Callable`, *optional*):\n A function that calls every `callback_steps` steps during inference. The function is called with the\n following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function is called. If not specified, the callback is called at\n every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in\n [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n guidance_rescale (`float`, *optional*, defaults to 0.0):\n Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are\n Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when\n using zero terminal SNR.\n guidance_scale_zero123 (`float`, *optional*, defaults to 3.0):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n cam_pose_torch: (`torch.FloatTensor`, *optional*):\n Camera pose in torch tensor, shape (4,). The elements mean (el, sin(az), cos(az), radius)\n fusion_schedule (`tuple[float]`, *optional*):\n Fusion schedule for video diffusion and zero123. The first element is the schedule for video diffusion, and the\n second element is the schedule for zero123. The length of each schedule should be the same as the number\n of timesteps.\n ddim_eta_0123 (`float`, *optional*, defaults to 1.0):\n The eta value for the 0123 diffusion steps. Only applies to the [`~schedulers.DDIMScheduler`], and is\n ignored in other schedulers.\n \n Example:\n \n\n Returns:\n [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`:\n If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is\n returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_videos_per_image_prompt = 1\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n num_inference_steps,\n fusion_schedule\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_video_classifier_free_guidance = guidance_scale_video > 1.0\n do_zero123_classifier_free_guidance = guidance_scale_zero123 > 1.0\n\n # 3.1 Encode input prompt for video diffusion\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt=prompt,\n device=device,\n # by diffusers v0.23.1, the naming of diffusers.pipelines.TextToVideoSDPipeline is still \"num_images_per_prompt\",\n # where it should be \"num_videos_per_prompt\"\n num_images_per_prompt=num_videos_per_image_prompt,\n do_classifier_free_guidance=do_video_classifier_free_guidance,\n negative_prompt=negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_video_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 3.2 Encode input image for zero123\n zero123_cond_images = [image for _ in range(num_frames)]\n zero123_embeds = self._encode_image_with_pose(\n zero123_cond_images,\n cam_pose_torch,\n device,\n num_videos_per_image_prompt,\n do_zero123_classifier_free_guidance,\n ) # (2xF) x 1 x 768\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_image_prompt,\n num_channels_latents,\n num_frames,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n noise_identical_accross_frames,\n )\n\n # 6. Prepare Zero123 image latents\n img_latents = self.prepare_img_latents(\n zero123_cond_images,\n batch_size=num_frames,\n dtype=zero123_embeds.dtype,\n device=device,\n generator=generator,\n do_zero123_classifier_free_guidance=True,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_video_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual with video diffusion\n noise_pred_video = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform classifier-free guidance for video diffusion\n if do_video_classifier_free_guidance:\n noise_pred_video_uncond, noise_pred_video_text = noise_pred_video.chunk(2)\n noise_pred_video = noise_pred_video_uncond + guidance_scale_video * (\n noise_pred_video_text - noise_pred_video_uncond\n )\n # if do_video_classifier_free_guidance and guidance_rescale > 0.0:\n # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n # noise_pred_video = rescale_noise_cfg(\n # noise_pred_video, noise_pred_video_text, guidance_rescale=guidance_rescale\n # )\n\n # zero123 denoising\n latent_model_input_zero123 = torch.cat([latents] * 2) if do_zero123_classifier_free_guidance else latents\n augmented_latent_model_input_zero123 = torch.cat(\n [rearrange(latent_model_input_zero123, \"B C F H W -> (B F) C H W\"), img_latents],\n dim=1,\n ).to(self.novel_view_unet.dtype)\n noise_pred_zero123 = self.novel_view_unet(\n augmented_latent_model_input_zero123,\n t,\n encoder_hidden_states=zero123_embeds,\n return_dict=True,\n ).sample\n noise_pred_zero123 = rearrange(noise_pred_zero123, \"(B F) C H W -> B C F H W\", F=num_frames)\n\n if do_zero123_classifier_free_guidance:\n noise_pred_zero123_uncond, noise_pred_zero123_text = noise_pred_zero123.chunk(2)\n noise_pred_zero123 = noise_pred_zero123_uncond + guidance_scale_zero123 * (\n noise_pred_zero123_text - noise_pred_zero123_uncond\n )\n\n # fusing video diffusion with zero123\n noise_pred = fusion_schedule[0][i] * noise_pred_video + fusion_schedule[1][i] * noise_pred_zero123\n\n # reshape latents\n bsz, channel, frames, width, height = latents.shape\n latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # reshape latents back\n latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if output_type == \"latent\":\n return TextToVideoSDPipelineOutput(frames=latents)\n\n video_tensor = self.decode_latents(latents)\n\n if output_type == \"pt\":\n video = video_tensor\n else:\n video = tensor2vid(video_tensor)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (video,)\n\n return TextToVideoSDPipelineOutput(frames=video)" }, { "identifier": "ViVid123BaseSchema", "path": "vivid123/configs/base_schema.py", "snippet": "class ViVid123BaseSchema(BaseModel):\n # Disable aliasing underscore to hyphen\n class Config:\n alias_generator = lambda string: string\n\n num_frames: int = 25\n delta_elevation_start: float = 0.0\n delta_elevation_end: float = 0.0\n delta_azimuth_start: float = -45.0\n delta_azimuth_end: float = 45.0\n delta_radius_start: float = 0.0\n delta_radius_end: float = 0.0\n height: int = 256\n width: int = 256\n # num_videos_per_image_prompt: int = 1 # Only support 1 for running on < 24G memory GPU\n num_inference_steps: int = 50\n guidance_scale_zero123: float = 3.0\n guidance_scale_video: float = 1.0\n eta: float = 1.0\n noise_identical_accross_frames: bool = False\n prompt: str = \"\"\n\n video_linear_start_weight: float = 1.0\n video_linear_end_weight: float = 0.5\n video_start_step_percentage: float = 0.0\n video_end_step_percentage: float = 1.0\n zero123_linear_start_weight: float = 1.0\n zero123_linear_end_weight: float = 1.0\n zero123_start_step_percentage: float = 0.0\n zero123_end_step_percentage: float = 1.0\n\n refiner_strength: float = 0.3\n refiner_guidance_scale: float = 12.0\n\n name: str = \"new_balance_used\"\n input_image_path: str = \"tmp/new_balance_used/012.png\"" } ]
import os import yaml import re import torch import numpy as np import imageio.v3 as imageio from typing import List, Any from yaml.parser import ParserError from PIL import Image from diffusers.pipelines import DiffusionPipeline from diffusers.models import UNet2DConditionModel, AutoencoderKL from diffusers.schedulers import DPMSolverMultistepScheduler, EulerDiscreteScheduler from diffusers.pipelines import DiffusionPipeline from transformers import CLIPVisionModelWithProjection from .models import CLIPCameraProjection from .pipelines import ViVid123Pipeline from .configs import ViVid123BaseSchema
10,899
video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet") zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection") zero123_img_enc = CLIPVisionModelWithProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="image_encoder")
def prepare_cam_pose_input( num_frames: int = 25, delta_elevation_start: float = 0.0, delta_elevation_end: float = 0.0, delta_azimuth_start: float = -45.0, delta_azimuth_end: float = 45.0, delta_radius_start: float = 0.0, delta_radius_end: float = 0.0, ): r""" The function to prepare the input to the vivid123 pipeline Args: delta_elevation_start (`float`, *optional*, defaults to 0.0): The starting relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_elevation_end (`float`, *optional*, defaults to 0.0): The ending relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_start (`float`, *optional*, defaults to -45.0): The starting relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_end (`float`, *optional*, defaults to 45.0): The ending relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. Returns: """ cam_elevation = np.radians(np.linspace(delta_elevation_start, delta_elevation_end, num_frames))[..., None] cam_azimuth = np.radians(np.linspace(delta_azimuth_start, delta_azimuth_end, num_frames)) cam_azimuth_sin_cos = np.stack([np.sin(cam_azimuth), np.cos(cam_azimuth)], axis=-1) cam_radius = np.linspace(delta_radius_start, delta_radius_end, num_frames)[..., None] cam_pose_np = np.concatenate([cam_elevation, cam_azimuth_sin_cos, cam_radius], axis=-1) cam_pose_torch = torch.from_numpy(cam_pose_np) return cam_pose_torch # refer to https://stackoverflow.com/a/33507138/6257375 def conver_rgba_to_rgb_white_bg( image: Image, H: int = 256, W: int = 256, ): input_image = image.convert("RGBA").resize((H, W), Image.BICUBIC) background = Image.new("RGBA", input_image.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, input_image) return alpha_composite def prepare_fusion_schedule_linear( num_inference_steps: int = 50, video_linear_start_weight: float = 1.0, video_linear_end_weight: float = 0.5, video_start_step_percentage: float = 0.0, video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet") zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection") zero123_img_enc = CLIPVisionModelWithProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="image_encoder")
vivid123_pipe = ViVid123Pipeline.from_pretrained(
1
2023-11-27 22:48:17+00:00
16k
TISUnion/PrimeBackup
prime_backup/mcdr/task/backup/restore_backup_task.py
[ { "identifier": "CreateBackupAction", "path": "prime_backup/action/create_backup_action.py", "snippet": "class CreateBackupAction(CreateBackupActionBase):\n\tdef __init__(self, creator: Operator, comment: str, *, tags: Optional[BackupTags] = None, expire_timestamp_ns: Optional[int] = None):\n\t\tsuper().__init__()\n\t\tif tags is None:\n\t\t\ttags = BackupTags()\n\n\t\tself.creator = creator\n\t\tself.comment = comment\n\t\tself.tags = tags\n\t\tself.expire_timestamp_ns = expire_timestamp_ns\n\n\t\tself.__pre_calc_result = _PreCalculationResult()\n\t\tself.__blob_store_st: Optional[os.stat_result] = None\n\t\tself.__blob_store_in_cow_fs: Optional[bool] = None\n\n\t\tself.__batch_query_manager: Optional[BatchQueryManager] = None\n\t\tself.__blob_by_size_cache: Dict[int, bool] = {}\n\t\tself.__blob_by_hash_cache: Dict[str, schema.Blob] = {}\n\n\tdef __scan_files(self) -> _ScanResult:\n\t\tcollected = []\n\n\t\tsource_path = self.config.source_path\n\t\tscanned_targets: Dict[str, bool] = {} # use as an ordered set\n\t\tscan_queue: Deque[Path] = collections.deque() # a queue of paths related to the source_path\n\t\tfor scan_target in self.config.backup.targets:\n\t\t\tscan_queue.append(Path(scan_target))\n\n\t\twhile len(scan_queue) > 0:\n\t\t\tscan_target = scan_queue.popleft()\n\t\t\tif (target_posix := scan_target.as_posix()) in scanned_targets:\n\t\t\t\tcontinue\n\t\t\tscanned_targets[target_posix] = True\n\n\t\t\ttarget_path = source_path / scan_target\n\t\t\tif not target_path.exists():\n\t\t\t\tself.logger.info('Skipping not-exist backup target {}'.format(target_path))\n\t\t\t\tcontinue\n\t\t\tif not path_utils.is_relative_to(target_path, source_path):\n\t\t\t\tself.logger.warning(\"Skipping backup target {} cuz it's not inside the source path {}\".format(target_path, source_path))\n\t\t\t\tcontinue\n\n\t\t\tcollected.append(target_path)\n\n\t\t\tif target_path.is_symlink() and self.config.backup.follow_target_symlink:\n\t\t\t\tscan_queue.append(target_path.readlink())\n\t\t\t\tcontinue\n\n\t\t\t# as-is policy, don't scan into symlink\n\t\t\tif not target_path.is_symlink() and target_path.is_dir():\n\t\t\t\tfor dir_path, dir_names, file_names in os.walk(target_path):\n\t\t\t\t\tfor name in file_names + dir_names:\n\t\t\t\t\t\tfile_path = Path(dir_path) / name\n\t\t\t\t\t\tif not self.config.backup.is_file_ignore(file_path):\n\t\t\t\t\t\t\tcollected.append(file_path)\n\n\t\treturn _ScanResult(all_file_paths=collected, root_targets=list(scanned_targets.keys()))\n\n\tdef __pre_calculate_hash(self, session: DbSession, scan_result: _ScanResult):\n\t\tstats = self.__pre_calc_result.stats\n\t\thashes = self.__pre_calc_result.hashes\n\t\tstats.clear()\n\t\thashes.clear()\n\n\t\tsizes = set()\n\t\tfor path in scan_result.all_file_paths:\n\t\t\tst = path.lstat()\n\t\t\tstats[path] = st\n\t\t\tif stat.S_ISREG(st.st_mode):\n\t\t\t\tsizes.add(st.st_size)\n\n\t\thash_dict_lock = threading.Lock()\n\t\texistence = session.has_blob_with_size_batched(list(sizes))\n\t\tself.__blob_by_size_cache.update(existence)\n\n\t\tdef hash_worker(pth: Path):\n\t\t\th = hash_utils.calc_file_hash(pth)\n\t\t\twith hash_dict_lock:\n\t\t\t\thashes[pth] = h\n\n\t\twith FailFastThreadPool(name='hasher') as pool:\n\t\t\tfor path in scan_result.all_file_paths:\n\t\t\t\tst = stats[path]\n\t\t\t\tif stat.S_ISREG(st.st_mode):\n\t\t\t\t\tif existence[st.st_size]:\n\t\t\t\t\t\t# we need to hash the file, sooner or later\n\t\t\t\t\t\tpool.submit(hash_worker, path)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass # will use hash_once policy\n\n\[email protected]_property\n\tdef __temp_path(self) -> Path:\n\t\tp = self.config.temp_path\n\t\tp.mkdir(parents=True, exist_ok=True)\n\t\treturn p\n\n\tdef __get_or_create_blob(self, session: DbSession, src_path: Path, st: os.stat_result) -> Generator[Any, Any, Tuple[schema.Blob, os.stat_result]]:\n\t\tsrc_path_str = repr(src_path.as_posix())\n\t\tsrc_path_md5 = hashlib.md5(src_path_str.encode('utf8')).hexdigest()\n\n\t\[email protected]\n\t\tdef make_temp_file() -> ContextManager[Path]:\n\t\t\ttemp_file_name = f'blob_{os.getpid()}_{threading.current_thread().ident}_{src_path_md5}.tmp'\n\t\t\ttemp_file_path = self.__temp_path / temp_file_name\n\t\t\twith contextlib.ExitStack() as exit_stack:\n\t\t\t\texit_stack.callback(functools.partial(self._remove_file, temp_file_path))\n\t\t\t\tyield temp_file_path\n\n\t\tdef attempt_once(last_chance: bool = False) -> Generator[Any, Any, schema.Blob]:\n\t\t\tcompress_method: CompressMethod = self.config.backup.get_compress_method_from_size(st.st_size)\n\t\t\tcan_copy_on_write = (\n\t\t\t\t\tfile_utils.HAS_COPY_FILE_RANGE and\n\t\t\t\t\tcompress_method == CompressMethod.plain and\n\t\t\t\t\tself.__blob_store_in_cow_fs and\n\t\t\t\t\tst.st_dev == self.__blob_store_st.st_dev\n\t\t\t)\n\n\t\t\tpolicy: Optional[_BlobCreatePolicy] = None\n\t\t\tblob_hash: Optional[str] = None\n\t\t\tblob_content: Optional[bytes] = None\n\t\t\traw_size: Optional[int] = None\n\t\t\tstored_size: Optional[int] = None\n\t\t\tpre_calc_hash = self.__pre_calc_result.hashes.pop(src_path, None)\n\n\t\t\tif last_chance:\n\t\t\t\tpolicy = _BlobCreatePolicy.copy_hash\n\t\t\telif pre_calc_hash is not None: # hash already calculated? just use default\n\t\t\t\tpolicy = _BlobCreatePolicy.default\n\t\t\t\tblob_hash = pre_calc_hash\n\t\t\telif not can_copy_on_write: # do tricks iff. no COW copy\n\t\t\t\tif st.st_size <= _READ_ALL_SIZE_THRESHOLD:\n\t\t\t\t\tpolicy = _BlobCreatePolicy.read_all\n\t\t\t\t\twith open(src_path, 'rb') as f:\n\t\t\t\t\t\tblob_content = f.read(_READ_ALL_SIZE_THRESHOLD + 1)\n\t\t\t\t\tif len(blob_content) > _READ_ALL_SIZE_THRESHOLD:\n\t\t\t\t\t\tself.logger.warning('Read too many bytes for read_all policy, stat: {}, read: {}'.format(st.st_size, len(blob_content)))\n\t\t\t\t\t\traise _BlobFileChanged()\n\t\t\t\t\tblob_hash = hash_utils.calc_bytes_hash(blob_content)\n\t\t\t\telif st.st_size > _HASH_ONCE_SIZE_THRESHOLD:\n\t\t\t\t\tif (exist := self.__blob_by_size_cache.get(st.st_size)) is None:\n\t\t\t\t\t\t# existence is unknown yet\n\t\t\t\t\t\tyield BlobBySizeFetcher.Req(st.st_size)\n\t\t\t\t\t\tcan_hash_once = self.__blob_by_size_cache[st.st_size] is False\n\t\t\t\t\telse:\n\t\t\t\t\t\tcan_hash_once = exist is False\n\t\t\t\t\tif can_hash_once:\n\t\t\t\t\t\t# it's certain that this blob is unique, but notes: the following code\n\t\t\t\t\t\t# cannot be interrupted (yield), or other generator could make a same blob\n\t\t\t\t\t\tpolicy = _BlobCreatePolicy.hash_once\n\t\t\tif policy is None:\n\t\t\t\tpolicy = _BlobCreatePolicy.default\n\t\t\t\tblob_hash = hash_utils.calc_file_hash(src_path)\n\n\t\t\t# self.logger.info(\"%s %s %s\", policy.name, compress_method.name, src_path)\n\t\t\tif blob_hash is not None:\n\t\t\t\tmisc_utils.assert_true(policy != _BlobCreatePolicy.hash_once, 'unexpected policy')\n\n\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\treturn cache\n\t\t\t\tyield BlobByHashFetcher.Req(blob_hash)\n\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\treturn cache\n\n\t\t\t# notes: the following code cannot be interrupted (yield).\n\t\t\t# The blob is specifically generated by the generator\n\t\t\t# if any yield is done, ensure to check __blob_by_hash_cache again\n\n\t\t\tdef check_changes(new_size: int, new_hash: Optional[str]):\n\t\t\t\tif new_size != st.st_size:\n\t\t\t\t\tself.logger.warning('Blob size mismatch, previous: {}, current: {}'.format(st.st_size, new_size))\n\t\t\t\t\traise _BlobFileChanged()\n\t\t\t\tif blob_hash is not None and new_hash is not None and new_hash != blob_hash:\n\t\t\t\t\tself.logger.warning('Blob hash mismatch, previous: {}, current: {}'.format(blob_hash, new_hash))\n\t\t\t\t\traise _BlobFileChanged()\n\n\t\t\tdef bp_rba(h: str) -> Path:\n\t\t\t\tbp = blob_utils.get_blob_path(h)\n\t\t\t\tself._add_remove_file_rollbacker(bp)\n\t\t\t\treturn bp\n\n\t\t\tcompressor = Compressor.create(compress_method)\n\t\t\tif policy == _BlobCreatePolicy.copy_hash:\n\t\t\t\t# copy to temp file, calc hash, then compress to blob store\n\t\t\t\tmisc_utils.assert_true(blob_hash is None, 'blob_hash should not be calculated')\n\t\t\t\twith make_temp_file() as temp_file_path:\n\t\t\t\t\tfile_utils.copy_file_fast(src_path, temp_file_path)\n\t\t\t\t\tblob_hash = hash_utils.calc_file_hash(temp_file_path)\n\n\t\t\t\t\tmisc_utils.assert_true(last_chance, 'only last_chance=True can use do hash_once without checking uniqueness')\n\t\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\t\treturn cache\n\t\t\t\t\tyield BlobByHashFetcher.Req(blob_hash)\n\t\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\t\treturn cache\n\n\t\t\t\t\tblob_path = bp_rba(blob_hash)\n\t\t\t\t\tcr = compressor.copy_compressed(temp_file_path, blob_path, calc_hash=False)\n\t\t\t\t\traw_size, stored_size = cr.read_size, cr.write_size\n\n\t\t\telif policy == _BlobCreatePolicy.hash_once:\n\t\t\t\t# read once, compress+hash to temp file, then move\n\t\t\t\tmisc_utils.assert_true(blob_hash is None, 'blob_hash should not be calculated')\n\t\t\t\twith make_temp_file() as temp_file_path:\n\t\t\t\t\tcr = compressor.copy_compressed(src_path, temp_file_path, calc_hash=True)\n\t\t\t\t\tcheck_changes(cr.read_size, None) # the size must be unchanged, to satisfy the uniqueness\n\n\t\t\t\t\traw_size, blob_hash, stored_size = cr.read_size, cr.read_hash, cr.write_size\n\t\t\t\t\tblob_path = bp_rba(blob_hash)\n\n\t\t\t\t\t# reference: shutil.move, but os.replace is used\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.replace(temp_file_path, blob_path)\n\t\t\t\t\texcept OSError:\n\t\t\t\t\t\t# The temp dir is in the different file system to the blob store?\n\t\t\t\t\t\t# Whatever, use file copy as the fallback\n\t\t\t\t\t\tfile_utils.copy_file_fast(temp_file_path, blob_path)\n\n\t\t\telse:\n\t\t\t\tmisc_utils.assert_true(blob_hash is not None, 'blob_hash is None')\n\t\t\t\tblob_path = bp_rba(blob_hash)\n\n\t\t\t\tif policy == _BlobCreatePolicy.read_all:\n\t\t\t\t\t# the file content is already in memory, just write+compress to blob store\n\t\t\t\t\tmisc_utils.assert_true(blob_content is not None, 'blob_content is None')\n\t\t\t\t\twith compressor.open_compressed_bypassed(blob_path) as (writer, f):\n\t\t\t\t\t\tf.write(blob_content)\n\t\t\t\t\traw_size, stored_size = len(blob_content), writer.get_write_len()\n\t\t\t\telif policy == _BlobCreatePolicy.default:\n\t\t\t\t\tif can_copy_on_write and compress_method == CompressMethod.plain:\n\t\t\t\t\t\t# fast copy, then calc size and hash to verify\n\t\t\t\t\t\tfile_utils.copy_file_fast(src_path, blob_path)\n\t\t\t\t\t\tstored_size, h2 = hash_utils.calc_file_size_and_hash(blob_path)\n\t\t\t\t\t\traw_size = stored_size\n\t\t\t\t\t\tcheck_changes(stored_size, h2)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# copy+compress+hash to blob store\n\t\t\t\t\t\tcr = compressor.copy_compressed(src_path, blob_path, calc_hash=True)\n\t\t\t\t\t\traw_size, stored_size = cr.read_size, cr.write_size\n\t\t\t\t\t\tcheck_changes(cr.read_size, cr.read_hash)\n\t\t\t\telse:\n\t\t\t\t\traise AssertionError()\n\n\t\t\tmisc_utils.assert_true(blob_hash is not None, 'blob_hash is None')\n\t\t\tmisc_utils.assert_true(raw_size is not None, 'raw_size is None')\n\t\t\tmisc_utils.assert_true(stored_size is not None, 'stored_size is None')\n\t\t\treturn self._create_blob(\n\t\t\t\tsession,\n\t\t\t\thash=blob_hash,\n\t\t\t\tcompress=compress_method.name,\n\t\t\t\traw_size=raw_size,\n\t\t\t\tstored_size=stored_size,\n\t\t\t)\n\n\t\tfor i in range(_BLOB_FILE_CHANGED_RETRY_COUNT):\n\t\t\tlast_attempt = i == _BLOB_FILE_CHANGED_RETRY_COUNT - 1\n\t\t\tif i > 0:\n\t\t\t\tself.logger.warning('Try to create blob {} (attempt {} / {})'.format(src_path_str, i + 1, _BLOB_FILE_CHANGED_RETRY_COUNT))\n\t\t\tgen = attempt_once(last_chance=last_attempt)\n\t\t\ttry:\n\t\t\t\tquery = gen.send(None)\n\t\t\t\twhile True:\n\t\t\t\t\tresult = yield query\n\t\t\t\t\tquery = gen.send(result)\n\t\t\texcept StopIteration as e: # ok\n\t\t\t\tblob: schema.Blob = e.value\n\t\t\t\tself.__blob_by_size_cache[blob.raw_size] = True\n\t\t\t\tself.__blob_by_hash_cache[blob.hash] = blob\n\t\t\t\treturn blob, st\n\t\t\texcept _BlobFileChanged:\n\t\t\t\tself.logger.warning('Blob {} stat has changed, {}'.format(src_path_str, 'no more retry' if last_attempt else 'retrying'))\n\t\t\t\tst = src_path.lstat()\n\n\t\tself.logger.error('All blob copy attempts failed, is the file {} keeps changing?'.format(src_path_str))\n\t\traise VolatileBlobFile('blob file {} keeps changing'.format(src_path_str))\n\n\tdef __create_file(self, session: DbSession, path: Path) -> Generator[Any, Any, schema.File]:\n\t\trelated_path = path.relative_to(self.config.source_path)\n\n\t\tif (st := self.__pre_calc_result.stats.pop(path, None)) is None:\n\t\t\tst = path.lstat()\n\n\t\tblob: Optional[schema.Blob] = None\n\t\tcontent: Optional[bytes] = None\n\t\tif stat.S_ISREG(st.st_mode):\n\t\t\tgen = self.__get_or_create_blob(session, path, st)\n\t\t\ttry:\n\t\t\t\tquery = gen.send(None)\n\t\t\t\twhile True:\n\t\t\t\t\tresult = yield query\n\t\t\t\t\tquery = gen.send(result)\n\t\t\texcept StopIteration as e:\n\t\t\t\tblob, st = e.value\n\t\t\t\t# notes: st.st_size might be incorrect, use blob.raw_size instead\n\t\telif stat.S_ISDIR(st.st_mode):\n\t\t\tpass\n\t\telif stat.S_ISLNK(st.st_mode):\n\t\t\tcontent = path.readlink().as_posix().encode('utf8')\n\t\telse:\n\t\t\traise UnsupportedFileFormat(st.st_mode)\n\n\t\treturn session.create_file(\n\t\t\tpath=related_path.as_posix(),\n\t\t\tcontent=content,\n\n\t\t\tmode=st.st_mode,\n\t\t\tuid=st.st_uid,\n\t\t\tgid=st.st_gid,\n\t\t\tctime_ns=st.st_ctime_ns,\n\t\t\tmtime_ns=st.st_mtime_ns,\n\t\t\tatime_ns=st.st_atime_ns,\n\n\t\t\tadd_to_session=False,\n\t\t\tblob=blob,\n\t\t)\n\n\tdef run(self) -> BackupInfo:\n\t\tsuper().run()\n\t\tself.__blob_by_size_cache.clear()\n\t\tself.__blob_by_hash_cache.clear()\n\n\t\ttry:\n\t\t\twith DbAccess.open_session() as session:\n\t\t\t\tself.__batch_query_manager = BatchQueryManager(session, self.__blob_by_size_cache, self.__blob_by_hash_cache)\n\n\t\t\t\tscan_result = self.__scan_files()\n\t\t\t\tbackup = session.create_backup(\n\t\t\t\t\tcreator=str(self.creator),\n\t\t\t\t\tcomment=self.comment,\n\t\t\t\t\ttargets=scan_result.root_targets,\n\t\t\t\t\ttags=self.tags.to_dict(),\n\t\t\t\t)\n\t\t\t\tself.logger.info('Creating backup {} on {}'.format(backup, scan_result.root_targets))\n\n\t\t\t\tif self.config.get_effective_concurrency() > 1:\n\t\t\t\t\tself.__pre_calculate_hash(session, scan_result)\n\t\t\t\t\tself.logger.info('Pre-calculate all file hash done')\n\n\t\t\t\tblob_utils.prepare_blob_directories()\n\t\t\t\tbs_path = blob_utils.get_blob_store()\n\t\t\t\tself.__blob_store_st = bs_path.stat()\n\t\t\t\tself.__blob_store_in_cow_fs = file_utils.does_fs_support_cow(bs_path)\n\n\t\t\t\tfiles = []\n\t\t\t\tschedule_queue: Deque[Tuple[Generator, Any]] = collections.deque()\n\t\t\t\tfor file_path in scan_result.all_file_paths:\n\t\t\t\t\tschedule_queue.append((self.__create_file(session, file_path), None))\n\t\t\t\twhile len(schedule_queue) > 0:\n\t\t\t\t\tgen, value = schedule_queue.popleft()\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdef callback(v, g=gen):\n\t\t\t\t\t\t\tschedule_queue.appendleft((g, v))\n\n\t\t\t\t\t\tquery = gen.send(value)\n\t\t\t\t\t\tself.__batch_query_manager.query(query, callback)\n\t\t\t\t\texcept StopIteration as e:\n\t\t\t\t\t\tfiles.append(misc_utils.ensure_type(e.value, schema.File))\n\n\t\t\t\t\tself.__batch_query_manager.flush_if_needed()\n\t\t\t\t\tif len(schedule_queue) == 0:\n\t\t\t\t\t\tself.__batch_query_manager.flush()\n\n\t\t\t\tself._finalize_backup_and_files(session, backup, files)\n\t\t\t\tinfo = BackupInfo.of(backup)\n\n\t\t\ts = self.get_new_blobs_summary()\n\t\t\tself.logger.info('Create backup #{} done, +{} blobs (size {} / {})'.format(\n\t\t\t\tinfo.id, s.count, ByteCount(s.stored_size).auto_str(), ByteCount(s.raw_size).auto_str(),\n\t\t\t))\n\t\t\treturn info\n\n\t\texcept Exception as e:\n\t\t\tself._apply_blob_rollback()\n\t\t\traise e" }, { "identifier": "ExportBackupToDirectoryAction", "path": "prime_backup/action/export_backup_action.py", "snippet": "class ExportBackupToDirectoryAction(_ExportBackupActionBase):\n\tclass _ExportItem(NamedTuple):\n\t\tfile: schema.File\n\t\tpath: Path # path to export, related to self.output_path\n\t\tpath_posix: str\n\n\tdef __init__(\n\t\t\tself, backup_id: int, output_path: Path, *,\n\t\t\trestore_mode: bool = False,\n\t\t\tchild_to_export: Optional[Path] = None,\n\t\t\trecursively_export_child: bool = False,\n\t\t\t**kwargs,\n\t):\n\t\t\"\"\"\n\t\t:param restore_mode: recover what it was like -- delete all backup targets before export\n\t\t\"\"\"\n\t\tsuper().__init__(backup_id, output_path, **kwargs)\n\t\tself.restore_mode = restore_mode\n\t\tself.child_to_export = child_to_export\n\t\tself.recursively_export_child = recursively_export_child\n\n\t\tif self.restore_mode and self.child_to_export is not None:\n\t\t\traise ValueError('restore mode does not support exporting child')\n\n\t@classmethod\n\tdef __set_attrs(cls, file: schema.File, file_path: Path):\n\t\t# reference: tarfile.TarFile.extractall, tarfile.TarFile._extract_member\n\n\t\tis_link = stat.S_ISLNK(file.mode)\n\n\t\tif _i_am_root() and file.uid is not None and file.gid is not None:\n\t\t\tu, g = int(file.uid), int(file.gid)\n\t\t\tif is_link and hasattr(os, 'lchown'):\n\t\t\t\tos.lchown(file_path, u, g)\n\t\t\telse:\n\t\t\t\tos.chown(file_path, u, g)\n\n\t\tif not is_link:\n\t\t\tos.chmod(file_path, file.mode)\n\n\t\tif file.atime_ns is not None and file.mtime_ns is not None:\n\t\t\ttimes = (file.atime_ns / 1e9, file.mtime_ns / 1e9)\n\t\t\tif is_link:\n\t\t\t\tif os.utime in os.supports_follow_symlinks:\n\t\t\t\t\tos.utime(file_path, times, follow_symlinks=False)\n\t\t\telse:\n\t\t\t\tos.utime(file_path, times)\n\n\tdef __prepare_for_export(self, item: _ExportItem, trash_bin: _TrashBin):\n\t\tfile_path = self.output_path / item.path\n\t\tif os.path.lexists(file_path):\n\t\t\ttrash_bin.add(file_path, item.path)\n\t\tfile_path.parent.mkdir(parents=True, exist_ok=True)\n\n\tdef __export_file(self, item: _ExportItem, exported_directories: 'queue.Queue[Tuple[schema.File, Path]]'):\n\t\tfile = item.file\n\t\tfile_path = self.output_path / item.path\n\n\t\tif stat.S_ISREG(file.mode):\n\t\t\tself.logger.debug('write file {}'.format(file.path))\n\t\t\tblob_path = blob_utils.get_blob_path(file.blob_hash)\n\t\t\tcompressor = Compressor.create(file.blob_compress)\n\t\t\tif compressor.get_method() == CompressMethod.plain:\n\t\t\t\tfile_utils.copy_file_fast(blob_path, file_path)\n\t\t\t\tif self.verify_blob:\n\t\t\t\t\tsah = hash_utils.calc_file_size_and_hash(file_path)\n\t\t\t\t\tself._verify_exported_blob(file, sah.size, sah.hash)\n\t\t\telse:\n\t\t\t\twith compressor.open_decompressed(blob_path) as f_in:\n\t\t\t\t\twith open(file_path, 'wb') as f_out:\n\t\t\t\t\t\tif self.verify_blob:\n\t\t\t\t\t\t\treader = BypassReader(f_in, calc_hash=True)\n\t\t\t\t\t\t\tshutil.copyfileobj(reader, f_out)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treader = None\n\t\t\t\t\t\t\tshutil.copyfileobj(f_in, f_out)\n\t\t\t\tif reader is not None:\n\t\t\t\t\tself._verify_exported_blob(file, reader.get_read_len(), reader.get_hash())\n\n\t\telif stat.S_ISDIR(file.mode):\n\t\t\tself.logger.debug('write dir {}'.format(file.path))\n\t\t\tfile_path.mkdir(parents=True, exist_ok=True)\n\t\t\texported_directories.put((file, file_path))\n\n\t\telif stat.S_ISLNK(file.mode):\n\t\t\tlink_target = file.content.decode('utf8')\n\t\t\tos.symlink(link_target, file_path)\n\t\t\tself.logger.debug('write symbolic link {} -> {}'.format(file_path, link_target))\n\t\telse:\n\t\t\tself._on_unsupported_file_mode(file)\n\n\t\tif not stat.S_ISDIR(file.mode):\n\t\t\tself.__set_attrs(file, file_path)\n\n\tdef _export_backup(self, session: DbSession, backup: schema.Backup) -> ExportFailures:\n\t\tfailures = ExportFailures(self.fail_soft)\n\n\t\t# 1. collect export item\n\n\t\tdef add_export_item(file_: schema.File, export_path: Path):\n\t\t\tfor t in backup.targets:\n\t\t\t\tif path_utils.is_relative_to(Path(file_.path), t):\n\t\t\t\t\texport_items.append(self._ExportItem(file_, export_path, export_path.as_posix()))\n\t\t\t\t\treturn\n\t\t\tself.logger.warning('Found out-of-backup-target file, ignored. file.path: {!r}, backup.targets: {}'.format(file, backup.targets))\n\n\t\texport_items: List[ExportBackupToDirectoryAction._ExportItem] = []\n\t\tif self.child_to_export is None:\n\t\t\tself.logger.info('Exporting {} to directory {}'.format(backup, self.output_path))\n\t\t\tfor file in backup.files:\n\t\t\t\tadd_export_item(file, Path(file.path))\n\t\telse:\n\t\t\tself.logger.info('Exporting child {!r} in {} to directory {}, recursively = {}'.format(self.child_to_export.as_posix(), backup, self.output_path, self.recursively_export_child))\n\t\t\tfor file in backup.files:\n\t\t\t\ttry:\n\t\t\t\t\trel_path = Path(file.path).relative_to(self.child_to_export)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tcontinue\n\t\t\t\tif rel_path != Path('.') and not self.recursively_export_child:\n\t\t\t\t\tcontinue\n\t\t\t\tadd_export_item(file, Path(self.child_to_export.name) / rel_path)\n\n\t\t# 2. do the export\n\n\t\tself.output_path.mkdir(parents=True, exist_ok=True)\n\t\tself.config.temp_path.mkdir(parents=True, exist_ok=True)\n\t\ttrash_bin_name_base = f'.{constants.PLUGIN_ID}.export_trashes'\n\t\ttrash_bin_dir_name = f'{trash_bin_name_base}_{os.getpid()}_{threading.current_thread().ident}'\n\t\ttrash_bin_path = self.config.temp_path / trash_bin_dir_name\n\t\tif self.config.temp_path.stat().st_dev != self.output_path.stat().st_dev:\n\t\t\ttrash_bin_path = self.output_path / trash_bin_dir_name\n\t\ttry:\n\t\t\t# remove existing undeleted trash bins\n\t\t\tfor f in trash_bin_path.parent.iterdir():\n\t\t\t\tif f.name.startswith(trash_bin_name_base):\n\t\t\t\t\tself.logger.warning('Removing existing undeleted trash bin {}'.format(f))\n\t\t\t\t\tfile_utils.rm_rf(f)\n\t\texcept OSError as e:\n\t\t\tself.logger.warning('Error when removing existing undeleted trash bins: {}'.format(e))\n\n\t\ttrash_bin = _TrashBin(trash_bin_path)\n\t\ttry:\n\t\t\tif self.restore_mode:\n\t\t\t\t# in restore mode, recover what it was like\n\t\t\t\t# if the backup does not have the target, don't keep the target\n\t\t\t\tfor target in backup.targets:\n\t\t\t\t\ttarget_path = self.output_path / target\n\t\t\t\t\tif os.path.lexists(target_path):\n\t\t\t\t\t\ttrash_bin.add(target_path, Path(target))\n\n\t\t\t# parent dir first, so the parent will be added to trash-bin first\n\t\t\texport_items.sort(key=lambda ei: ei.path_posix)\n\t\t\tfor item in export_items:\n\t\t\t\twith failures.handling_exception(item.file):\n\t\t\t\t\tself.__prepare_for_export(item, trash_bin)\n\n\t\t\tdirectories: 'queue.Queue[Tuple[schema.File, Path]]' = queue.Queue()\n\t\t\twith FailFastThreadPool('export') as pool:\n\t\t\t\tdef export_worker(item_: ExportBackupToDirectoryAction._ExportItem):\n\t\t\t\t\twith failures.handling_exception(item_.file):\n\t\t\t\t\t\tself.__export_file(item_, directories)\n\n\t\t\t\tfor item in export_items:\n\t\t\t\t\tpool.submit(export_worker, item)\n\n\t\t\t# child dir first\n\t\t\t# reference: tarfile.TarFile.extractall\n\t\t\tfor dir_file, dir_file_path in sorted(\n\t\t\t\t\tcollection_utils.drain_queue(directories),\n\t\t\t\t\tkey=lambda d: d[0].path,\n\t\t\t\t\treverse=True,\n\t\t\t):\n\t\t\t\twith failures.handling_exception(dir_file):\n\t\t\t\t\tself.__set_attrs(dir_file, dir_file_path)\n\n\t\texcept Exception:\n\t\t\tself.logger.warning('Error occurs during export to directory, applying rollback')\n\t\t\ttrash_bin.restore()\n\t\t\traise\n\t\tfinally:\n\t\t\ttrash_bin.erase()\n\n\t\treturn failures" }, { "identifier": "GetBackupAction", "path": "prime_backup/action/get_backup_action.py", "snippet": "class GetBackupAction(Action[BackupInfo]):\n\tdef __init__(self, backup_id: int, *, with_files: bool = False):\n\t\tsuper().__init__()\n\t\tself.backup_id = misc_utils.ensure_type(backup_id, int)\n\t\tself.with_files = with_files\n\n\tdef run(self) -> BackupInfo:\n\t\twith DbAccess.open_session() as session:\n\t\t\tbackup = session.get_backup(self.backup_id)\n\t\t\treturn BackupInfo.of(backup, with_files=self.with_files)" }, { "identifier": "ListBackupAction", "path": "prime_backup/action/list_backup_action.py", "snippet": "class ListBackupAction(_ListBackupActionBase[List[BackupInfo]]):\n\tdef run(self) -> List[BackupInfo]:\n\t\twith DbAccess.open_session() as session:\n\t\t\tbackups = session.list_backup(backup_filter=self.backup_filter, limit=self.limit, offset=self.offset)\n\t\t\treturn [BackupInfo.of(backup) for backup in backups]" }, { "identifier": "HeavyTask", "path": "prime_backup/mcdr/task/basic_task.py", "snippet": "class HeavyTask(_BasicTask[_T], ABC):\n\t\"\"\"\n\tFor tasks that require DB access and does some operations on blobs / database\n\t\"\"\"\n\tMAX_ONGOING_TASK = 1" }, { "identifier": "TextComponents", "path": "prime_backup/mcdr/text_components.py", "snippet": "class TextComponents:\n\t@classmethod\n\tdef tr(cls, key, *args, **kwargs):\n\t\tfrom prime_backup.utils.mcdr_utils import tr\n\t\treturn tr('text_components.' + key, *args, **kwargs)\n\n\t@classmethod\n\tdef auto(cls, value: Any) -> RTextBase:\n\t\tif isinstance(value, bool):\n\t\t\treturn cls.boolean(value)\n\t\telif isinstance(value, (int, float)):\n\t\t\treturn cls.number(value)\n\t\telif isinstance(value, Duration):\n\t\t\treturn cls.duration(value)\n\t\telif isinstance(value, Operator):\n\t\t\treturn cls.operator(value)\n\t\telif isinstance(value, ByteCount):\n\t\t\treturn cls.file_size(value)\n\t\telif isinstance(value, Path):\n\t\t\treturn cls.file_name(value)\n\t\telif isinstance(value, datetime.datetime):\n\t\t\treturn cls.date(value)\n\t\telse:\n\t\t\treturn RTextBase.from_any(value)\n\n\t@classmethod\n\tdef backup_brief(cls, backup: BackupInfo, *, backup_id_fancy: bool = True) -> RTextBase:\n\t\t# \"backup #1: foobar\"\n\t\treturn RTextList(cls.tr(\n\t\t\t'backup_brief',\n\t\t\tcls.backup_id(backup.id, hover=backup_id_fancy, click=backup_id_fancy),\n\t\t\tcls.backup_comment(backup.comment),\n\t\t))\n\n\t@classmethod\n\tdef backup_comment(cls, comment: str) -> RTextBase:\n\t\tif len(comment) > 0:\n\t\t\tif (er := backup_utils.extract_backup_comment_translation_key(comment)) is not None:\n\t\t\t\targs = er.args\n\t\t\t\tif er.key == 'pre_restore' and len(args) == 0:\n\t\t\t\t\targs = ('?',)\n\t\t\t\treturn cls.tr(f'backup_comment.{er.key}', *args)\n\t\t\treturn RText(comment)\n\t\telse:\n\t\t\treturn cls.tr('backup_comment.none').set_color(RColor.gray).set_styles(RStyle.italic)\n\n\t@classmethod\n\tdef backup_date(cls, backup: BackupInfo):\n\t\treturn cls.date(backup.date)\n\n\t@classmethod\n\tdef backup_full(cls, backup: BackupInfo, operation_buttons: bool = False, *, show_flags: bool = False) -> RTextBase:\n\t\t# \"[#1] [>] [x] H-- 1.2GiB 2023-11-30 09:30:13: foobar\"\n\t\tt_bid = cls.backup_id(backup.id)\n\n\t\trtl = RTextList(RText('[', RColor.gray), t_bid, RText('] ', RColor.gray))\n\t\tif operation_buttons:\n\t\t\trtl.append(RText('[>]', color=RColor.dark_green).h(cls.tr('backup_full.restore', t_bid)).c(RAction.suggest_command, mkcmd(f'back {backup.id}')), ' ')\n\t\t\tif not backup.tags.is_protected():\n\t\t\t\trtl.append(RText('[x]', color=RColor.red).h(cls.tr('backup_full.delete', t_bid)).c(RAction.suggest_command, mkcmd(f'delete {backup.id}')), ' ')\n\t\t\telse:\n\t\t\t\trtl.append(RText('[x]', color=RColor.dark_gray).h(cls.tr('backup_full.protected', t_bid)), ' ')\n\n\t\tif show_flags:\n\t\t\tfor name in [BackupTagName.hidden, BackupTagName.pre_restore_backup, BackupTagName.protected]:\n\t\t\t\tmisc_utils.assert_true(name.value.type is bool, 'it should be a bool field')\n\t\t\t\tflag = backup.tags.get(name) is True\n\t\t\t\tif flag:\n\t\t\t\t\trtl.append(name.value.flag)\n\t\t\t\telse:\n\t\t\t\t\trtl.append(RText('-', RColor.dark_gray))\n\t\t\trtl.append(' ')\n\n\t\trtl.append(\n\t\t\tcls.backup_size(backup), ' ',\n\t\t\tcls.backup_date(backup), RText(': ', RColor.gray),\n\t\t\tcls.backup_comment(backup.comment).h(cls.tr('backup_full.creator', cls.operator(backup.creator))),\n\t\t)\n\t\treturn rtl\n\n\t@classmethod\n\tdef backup_id(cls, backup_id: Union[int, BackupInfo], *, hover: bool = True, click: bool = True) -> RTextBase:\n\t\tif isinstance(backup_id, BackupInfo):\n\t\t\tbackup_id = backup_id.id\n\t\ttext = RText(f'#{backup_id}', TextColors.backup_id)\n\t\tif hover:\n\t\t\ttext.h(cls.tr('backup_id.hover', RText(backup_id, TextColors.backup_id)))\n\t\tif click:\n\t\t\ttext.c(RAction.run_command, mkcmd(f'show {backup_id}'))\n\t\treturn text\n\n\t@classmethod\n\tdef backup_id_list(cls, backup_ids: Iterable[Any], **kwargs) -> RTextBase:\n\t\treturn RTextList(\n\t\t\t'[',\n\t\t\tRTextBase.join(', ', [cls.backup_id(backup_id, **kwargs) for backup_id in backup_ids]),\n\t\t\t']',\n\t\t)\n\n\t@classmethod\n\tdef backup_size(cls, backup_or_blob_list_summary: Union[BackupInfo, BlobListSummary], *, ndigits: int = 2) -> RTextBase:\n\t\tb = backup_or_blob_list_summary\n\t\treturn cls.file_size(b.raw_size, ndigits=ndigits).h(cls.dual_size_hover(b.raw_size, b.stored_size))\n\n\t@classmethod\n\tdef blob_list_summary_store_size(cls, bls: BlobListSummary) -> RTextBase:\n\t\treturn cls.file_size(bls.raw_size).h(cls.dual_size_hover(bls.raw_size, bls.stored_size))\n\n\t@classmethod\n\tdef boolean(cls, value: bool) -> RTextBase:\n\t\treturn RText(str(value).lower(), RColor.green if value else RColor.red)\n\n\t@classmethod\n\tdef command(cls, s: str, *, color: RColor = RColor.gray, suggest: bool = False, run: bool = False, raw: bool = False) -> RTextBase:\n\t\tcmd = s if raw else mkcmd(s)\n\t\ttext = RText(cmd, color)\n\t\tif suggest:\n\t\t\ttext.h(cls.tr('command.suggest', cmd)).c(RAction.suggest_command, cmd)\n\t\telif run:\n\t\t\ttext.h(cls.tr('command.run', cmd)).c(RAction.run_command, cmd)\n\t\treturn text\n\n\t@classmethod\n\tdef compress_method(cls, compress_method: Union[str, CompressMethod]) -> RTextBase:\n\t\tif isinstance(compress_method, CompressMethod):\n\t\t\tcompress_method = compress_method.name\n\t\treturn RText(compress_method, RColor.light_purple)\n\n\t@classmethod\n\tdef confirm_hint(cls, what: RTextBase, time_wait_text: Any):\n\t\treturn cls.tr(\n\t\t\t'confirm_hint.base',\n\t\t\ttime_wait_text,\n\t\t\tclick_and_run(\n\t\t\t\tRTextList(cls.tr('confirm_hint.confirm', what), '√').set_color(RColor.yellow),\n\t\t\t\tcls.tr('confirm_hint.confirm.hover', cls.command('confirm'), what),\n\t\t\t\tmkcmd('confirm'),\n\t\t\t),\n\t\t\tclick_and_run(\n\t\t\t\tRTextList(cls.tr('confirm_hint.abort', what), '×').set_color(RColor.gold),\n\t\t\t\tcls.tr('confirm_hint.abort.hover', cls.command('abort'), what),\n\t\t\t\tmkcmd('abort'),\n\t\t\t),\n\t\t)\n\n\t@classmethod\n\tdef crontab(cls, crontab_str: str) -> RTextBase:\n\t\turl = 'https://crontab.guru/#' + crontab_str.replace(' ', '_')\n\t\treturn RText(crontab_str, TextColors.date).h(cls.tr('crontab.help_url', cls.url(url, click=False))).c(RAction.open_url, url)\n\n\t@classmethod\n\tdef date_diff(cls, date: datetime.datetime) -> RTextBase:\n\t\tnow = datetime.datetime.now(date.tzinfo)\n\t\tdiff = (date - now).total_seconds()\n\t\tif diff >= 0:\n\t\t\treturn cls.tr('date_diff.later', cls.duration(diff))\n\t\telse:\n\t\t\treturn cls.tr('date_diff.ago', cls.duration(-diff))\n\n\t@classmethod\n\tdef date(cls, date: Union[datetime.datetime, int]) -> RTextBase:\n\t\tif isinstance(date, int):\n\t\t\tdate = conversion_utils.timestamp_to_local_date(date)\n\t\treturn RText(conversion_utils.datetime_to_str(date), TextColors.date).h(cls.date_diff(date))\n\n\t@classmethod\n\tdef dual_size_hover(cls, raw_size: int, stored_size: int, *, ndigits: int = 2) -> RTextBase:\n\t\tt_raw_size = cls.file_size(raw_size, ndigits=ndigits)\n\t\tt_stored_size = cls.file_size(stored_size, ndigits=ndigits)\n\t\tt_percent = cls.percent(stored_size, raw_size)\n\t\treturn cls.tr('dual_size_hover', t_stored_size, t_percent, t_raw_size)\n\n\t@classmethod\n\tdef duration(cls, seconds_or_duration: Union[float, Duration], *, color: Optional[RColor] = TextColors.number, ndigits: int = 2) -> RTextBase:\n\t\t# full duration text, e.g. \"1 minute\", \"2 hours\"\n\t\tif isinstance(seconds_or_duration, Duration):\n\t\t\tduration = seconds_or_duration\n\t\telif isinstance(seconds_or_duration, (int, float)):\n\t\t\tduration = Duration(seconds_or_duration)\n\t\telse:\n\t\t\traise TypeError(type(seconds_or_duration))\n\t\tvalue, unit = duration.auto_format()\n\t\tplural_suffix = cls.tr('duration.plural_suffix') if value != 1 else ''\n\t\ttext = cls.tr('duration.text', round(value, ndigits), cls.tr('duration.' + unit, plural_suffix))\n\t\tif color is not None:\n\t\t\ttext.set_color(color)\n\t\treturn text\n\n\t@classmethod\n\tdef file_mode(cls, mode: int) -> RTextBase:\n\t\tif stat.S_ISREG(mode):\n\t\t\ttype_flag = '-'\n\t\t\tcolor = RColor.light_purple\n\t\telif stat.S_ISDIR(mode):\n\t\t\ttype_flag = 'd'\n\t\t\tcolor = RColor.blue\n\t\telif stat.S_ISLNK(mode):\n\t\t\ttype_flag = 'l'\n\t\t\tcolor = RColor.aqua\n\t\telse:\n\t\t\ttype_flag = '?'\n\t\t\tcolor = RColor.gray\n\n\t\tpermissions = ''\n\t\tfor i in range(9):\n\t\t\tpermissions += 'rwx'[i % 3] if (mode >> (8 - i)) & 1 == 1 else '-'\n\n\t\treturn RText(type_flag + permissions, color)\n\n\t@classmethod\n\tdef file_name(cls, file_path: Path) -> RTextBase:\n\t\treturn RText(file_path.name, TextColors.file).h(file_path.as_posix())\n\n\t@classmethod\n\tdef file_size(cls, byte_cnt: Union[int, ByteCount], *, ndigits: int = 2, always_sign: bool = False, color: RColor = TextColors.byte_count) -> RTextBase:\n\t\tif not isinstance(byte_cnt, ByteCount):\n\t\t\tbyte_cnt = ByteCount(byte_cnt)\n\t\treturn RText(byte_cnt.auto_str(ndigits=ndigits, always_sign=always_sign), color=color)\n\n\t@classmethod\n\tdef hash_method(cls, hash_method: Union[str, HashMethod]) -> RTextBase:\n\t\tif isinstance(hash_method, HashMethod):\n\t\t\thash_method = hash_method.name\n\t\treturn RText(hash_method, RColor.light_purple)\n\n\t@classmethod\n\tdef number(cls, value: Any) -> RTextBase:\n\t\treturn RText(value, TextColors.number)\n\n\t@classmethod\n\tdef number_list(cls, values: Iterable[Any]) -> RTextBase:\n\t\treturn RTextList(\n\t\t\t'[',\n\t\t\tRTextBase.join(', ', [cls.number(v) for v in values]),\n\t\t\t']',\n\t\t)\n\n\t@classmethod\n\tdef operator(cls, op: Operator) -> RTextBase:\n\t\ttr_key = f'operator.{op.type}'\n\t\tif op.type in ['player', 'command_source', 'unknown']:\n\t\t\treturn cls.tr(tr_key, op.name)\n\t\telif op.type in ['console']:\n\t\t\treturn cls.tr(tr_key)\n\t\telif op.type == constants.PLUGIN_ID:\n\t\t\tfrom prime_backup.mcdr import mcdr_globals\n\t\t\tt_name = cls.tr(tr_key + '.' + op.name)\n\t\t\tif not mcdr_globals.server.has_translation(misc_utils.ensure_type(getattr(t_name, 'translation_key'), str)):\n\t\t\t\tt_name = RText(op.name, styles=RStyle.italic)\n\t\t\treturn RTextList(cls.tr(tr_key), RText('-', RColor.gray), t_name).set_color(RColor.dark_aqua)\n\t\telse:\n\t\t\treturn RText(f'{op.type}:{op.name}')\n\n\t@classmethod\n\tdef percent(cls, value: float, total: float) -> RTextBase:\n\t\tif total != 0:\n\t\t\treturn RText(f'{100 * value / total:.1f}%', RColor.dark_green)\n\t\telse:\n\t\t\treturn RText('N/A', RColor.gray)\n\n\t@classmethod\n\tdef tag_name(cls, tag_name: BackupTagName) -> RTextBase:\n\t\treturn RText(tag_name.name, TextColors.backup_tag).h(tag_name.value.text)\n\n\t@classmethod\n\tdef title(cls, text: Any) -> RTextBase:\n\t\treturn RTextList(RText('======== ', RColor.gray), text, RText(' ========', RColor.gray))\n\n\t@classmethod\n\tdef url(cls, url: str, *, click: bool = True) -> RTextBase:\n\t\ttext = RText(url, RColor.blue, RStyle.underlined)\n\t\tif click:\n\t\t\ttext.c(RAction.open_url, url)\n\t\treturn text" }, { "identifier": "BackupFilter", "path": "prime_backup/types/backup_filter.py", "snippet": "class BackupFilter:\n\tid_start: Optional[int] = None\n\tid_end: Optional[int] = None\n\tcreator: Optional[Operator] = None\n\ttimestamp_start: Optional[int] = None\n\ttimestamp_end: Optional[int] = None\n\ttag_filters: List[BackupTagFilter] = dataclasses.field(default_factory=list)\n\n\tdef filter_pre_restore_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.pre_restore_backup, True, BackupTagFilter.Policy.equals))\n\t\treturn self\n\n\tdef filter_non_pre_restore_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.pre_restore_backup, True, BackupTagFilter.Policy.not_equals))\n\t\treturn self\n\n\tdef filter_non_hidden_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.hidden, True, BackupTagFilter.Policy.not_equals))\n\t\treturn self\n\n\tdef filter_non_protected_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.protected, True, BackupTagFilter.Policy.not_equals))\n\t\treturn self" }, { "identifier": "BackupInfo", "path": "prime_backup/types/backup_info.py", "snippet": "class BackupInfo:\n\tid: int\n\ttimestamp_ns: int\n\tcreator: Operator\n\tcomment: str\n\ttargets: List[str]\n\ttags: BackupTags\n\n\traw_size: int # uncompressed size\n\tstored_size: int # actual size\n\n\tfiles: List['FileInfo']\n\n\[email protected]_property\n\tdef date(self) -> datetime.datetime:\n\t\treturn conversion_utils.timestamp_to_local_date(self.timestamp_ns)\n\n\[email protected]_property\n\tdef date_str(self) -> str:\n\t\treturn conversion_utils.timestamp_to_local_date_str(self.timestamp_ns)\n\n\t@classmethod\n\tdef of(cls, backup: schema.Backup, *, with_files: bool = False) -> 'Self':\n\t\t\"\"\"\n\t\tNotes: should be inside a session\n\t\t\"\"\"\n\t\tfrom prime_backup.types.file_info import FileInfo\n\t\treturn cls(\n\t\t\tid=backup.id,\n\t\t\ttimestamp_ns=backup.timestamp,\n\t\t\tcreator=Operator.of(backup.creator),\n\t\t\tcomment=backup.comment,\n\t\t\ttargets=list(backup.targets),\n\t\t\ttags=BackupTags(backup.tags),\n\t\t\traw_size=backup.file_raw_size_sum or 0,\n\t\t\tstored_size=backup.file_stored_size_sum or 0,\n\t\t\tfiles=list(map(FileInfo.of, backup.files)) if with_files else [],\n\t\t)" }, { "identifier": "BackupTags", "path": "prime_backup/types/backup_tags.py", "snippet": "class BackupTags:\n\tdata: 'BackupTagDict'\n\tNONE = object()\n\n\tdef __init__(self, data: Optional['BackupTagDict'] = None):\n\t\tself.data = {}\n\t\tif data is not None:\n\t\t\tself.data.update(data)\n\n\tdef get(self, name: BackupTagName) -> Any:\n\t\treturn self.data.get(name.name) if name.name in self.data else self.NONE\n\n\tdef set(self, name: BackupTagName, value: Any) -> 'BackupTags':\n\t\tself.data[name.name] = misc_utils.ensure_type(value, name.value.type)\n\t\treturn self\n\n\tdef clear(self, name: BackupTagName) -> bool:\n\t\ttry:\n\t\t\tself.data.pop(name.name)\n\t\t\treturn True\n\t\texcept KeyError:\n\t\t\treturn False\n\n\tdef to_dict(self) -> 'BackupTagDict':\n\t\treturn self.data.copy()\n\n\tdef __len__(self) -> int:\n\t\treturn len(self.data)\n\n\tdef items(self):\n\t\treturn self.data.items()\n\n\t# ============ accessors ============\n\n\tdef is_hidden(self) -> bool:\n\t\treturn self.get(BackupTagName.hidden) is True\n\n\tdef is_backup_before_restore(self) -> bool:\n\t\treturn self.get(BackupTagName.pre_restore_backup) is True\n\n\tdef is_protected(self) -> bool:\n\t\treturn self.get(BackupTagName.protected) is True" }, { "identifier": "BackupTagName", "path": "prime_backup/types/backup_tags.py", "snippet": "class BackupTagName(enum.Enum):\n\t# name -> type\n\thidden = BackupTagValue(bool, 'H', RColor.blue)\n\tpre_restore_backup = BackupTagValue(bool, 'R', RColor.yellow)\n\tprotected = BackupTagValue(bool, 'P', RColor.dark_green)" }, { "identifier": "Operator", "path": "prime_backup/types/operator.py", "snippet": "class Operator(NamedTuple):\n\ttype: str\n\tname: str\n\n\t@classmethod\n\tdef pb(cls, what: str) -> 'Operator':\n\t\treturn Operator(constants.PLUGIN_ID, what)\n\n\t@classmethod\n\tdef player(cls, name: str) -> 'Operator':\n\t\treturn Operator('player', name)\n\n\t@classmethod\n\tdef console(cls) -> 'Operator':\n\t\treturn Operator('console', '')\n\n\t@classmethod\n\tdef of(cls, value: Union[str, 'CommandSource']) -> 'Operator':\n\t\tfrom mcdreforged.api.all import CommandSource\n\t\tif isinstance(value, CommandSource):\n\t\t\tif value.is_player:\n\t\t\t\t# noinspection PyUnresolvedReferences\n\t\t\t\treturn cls.player(value.player)\n\t\t\telif value.is_console:\n\t\t\t\treturn cls.console()\n\t\t\telse:\n\t\t\t\treturn Operator('command_source', str(value))\n\t\telif isinstance(value, str):\n\t\t\tif ':' in value:\n\t\t\t\tt, n = value.split(':', 1)\n\t\t\t\treturn Operator(type=t, name=n)\n\t\t\telse:\n\t\t\t\treturn Operator(type='unknown', name=value)\n\t\telse:\n\t\t\traise TypeError(value)\n\n\tdef to_text(self) -> 'RTextBase':\n\t\tfrom prime_backup.mcdr.text_components import TextComponents\n\t\treturn TextComponents.operator(self)\n\n\tdef __str__(self):\n\t\treturn f'{self.type}:{self.name}'\n\n\tdef is_player(self) -> bool:\n\t\treturn self.type == 'player'" }, { "identifier": "PrimeBackupOperatorNames", "path": "prime_backup/types/operator.py", "snippet": "class PrimeBackupOperatorNames:\n\tpre_restore = 'pre_restore'\n\tscheduled_backup = 'scheduled_backup'\n\ttest = 'test'" }, { "identifier": "backup_utils", "path": "prime_backup/utils/backup_utils.py", "snippet": "_PATTERN_WORDS = re.compile(r'\\w+')\n_PATTERN_EXTRACT = re.compile(r'__pb_translated__:(\\w+)')\n_PATTERN_EXTRACT_WITH_ARGS = re.compile(r'__pb_translated__:(\\w+):(.*)')\ndef create_translated_backup_comment(key: str, *args) -> str:\ndef extract_backup_comment_translation_key(comment: str) -> Optional[ExtractResult]:\nclass ExtractResult(NamedTuple):" }, { "identifier": "log_utils", "path": "prime_backup/utils/log_utils.py", "snippet": "LOG_FORMATTER = logging.Formatter('[%(asctime)s %(levelname)s] (%(funcName)s) %(message)s')\nLOG_FORMATTER_NO_FUNC = logging.Formatter('[%(asctime)s %(levelname)s] %(message)s')\ndef __get_log_mode() -> int:\ndef __get_log_file_path(file_name: str) -> Path:\ndef create_file_logger(name: str) -> logging.Logger:\ndef open_file_logger(name: str) -> ContextManager[logging.Logger]:" }, { "identifier": "click_and_run", "path": "prime_backup/utils/mcdr_utils.py", "snippet": "def click_and_run(message: Any, text: Any, command: str) -> RTextBase:\n\treturn RTextBase.from_any(message).h(text).c(RAction.run_command, command)" }, { "identifier": "mkcmd", "path": "prime_backup/utils/mcdr_utils.py", "snippet": "def mkcmd(s: str) -> str:\n\tfrom prime_backup.config.config import Config\n\tcmd = Config.get().command.prefix\n\tif len(s) > 0:\n\t\tcmd += ' ' + s\n\treturn cmd" }, { "identifier": "Timer", "path": "prime_backup/utils/timer.py", "snippet": "class Timer:\n\t__start_time: float\n\t__end_time: Optional[float]\n\n\tdef __init__(self):\n\t\tself.start()\n\n\tdef start(self):\n\t\tself.__start_time = _now()\n\t\tself.__end_time = None\n\n\tdef restart(self):\n\t\tself.start()\n\n\tdef stop(self):\n\t\tself.__end_time = _now()\n\n\tdef is_ticking(self) -> bool:\n\t\treturn self.__end_time is None\n\n\tdef get_elapsed(self) -> float:\n\t\treturn _now() - self.__start_time\n\n\tdef get_and_restart(self) -> float:\n\t\tret = self.get_elapsed()\n\t\tself.restart()\n\t\treturn ret" } ]
from typing import Optional from mcdreforged.api.all import * from prime_backup.action.create_backup_action import CreateBackupAction from prime_backup.action.export_backup_action import ExportBackupToDirectoryAction from prime_backup.action.get_backup_action import GetBackupAction from prime_backup.action.list_backup_action import ListBackupAction from prime_backup.mcdr.task.basic_task import HeavyTask from prime_backup.mcdr.text_components import TextComponents from prime_backup.types.backup_filter import BackupFilter from prime_backup.types.backup_info import BackupInfo from prime_backup.types.backup_tags import BackupTags, BackupTagName from prime_backup.types.operator import Operator, PrimeBackupOperatorNames from prime_backup.utils import backup_utils, log_utils from prime_backup.utils.mcdr_utils import click_and_run, mkcmd from prime_backup.utils.timer import Timer
12,221
class RestoreBackupTask(HeavyTask[None]): def __init__(self, source: CommandSource, backup_id: Optional[int] = None, needs_confirm: bool = True, fail_soft: bool = False, verify_blob: bool = True): super().__init__(source) self.backup_id = backup_id self.needs_confirm = needs_confirm self.fail_soft = fail_soft self.verify_blob = verify_blob self.__can_abort = False @property def id(self) -> str: return 'backup_restore' def is_abort_able(self) -> bool: return super().is_abort_able() or self.__can_abort def get_abort_permission(self) -> int: return 0 def __countdown_and_stop_server(self, backup: BackupInfo) -> bool: for countdown in range(max(0, self.config.command.restore_countdown_sec), 0, -1): self.broadcast(click_and_run( RText('!!! ', RColor.red) + self.tr('countdown', countdown, TextComponents.backup_brief(backup, backup_id_fancy=False)), self.tr('countdown.hover', TextComponents.command('abort')), mkcmd('abort'), )) if self.aborted_event.wait(1): self.broadcast(self.get_aborted_text()) return False self.server.stop() self.logger.info('Wait for server to stop') self.server.wait_until_stop() return True def run(self): if self.backup_id is None: backup_filter = BackupFilter() backup_filter.filter_non_pre_restore_backup() candidates = ListBackupAction(backup_filter=backup_filter, limit=1).run() if len(candidates) == 0: self.reply_tr('no_backup') return backup = candidates[0] else: backup = GetBackupAction(self.backup_id).run() self.__can_abort = True self.broadcast(self.tr('show_backup', TextComponents.backup_brief(backup))) if self.needs_confirm: if not self.wait_confirm(self.tr('confirm_target')): return server_was_running = self.server.is_server_running() if server_was_running: if not self.__countdown_and_stop_server(backup): return else: self.logger.info('Found an already-stopped server') self.__can_abort = False
class RestoreBackupTask(HeavyTask[None]): def __init__(self, source: CommandSource, backup_id: Optional[int] = None, needs_confirm: bool = True, fail_soft: bool = False, verify_blob: bool = True): super().__init__(source) self.backup_id = backup_id self.needs_confirm = needs_confirm self.fail_soft = fail_soft self.verify_blob = verify_blob self.__can_abort = False @property def id(self) -> str: return 'backup_restore' def is_abort_able(self) -> bool: return super().is_abort_able() or self.__can_abort def get_abort_permission(self) -> int: return 0 def __countdown_and_stop_server(self, backup: BackupInfo) -> bool: for countdown in range(max(0, self.config.command.restore_countdown_sec), 0, -1): self.broadcast(click_and_run( RText('!!! ', RColor.red) + self.tr('countdown', countdown, TextComponents.backup_brief(backup, backup_id_fancy=False)), self.tr('countdown.hover', TextComponents.command('abort')), mkcmd('abort'), )) if self.aborted_event.wait(1): self.broadcast(self.get_aborted_text()) return False self.server.stop() self.logger.info('Wait for server to stop') self.server.wait_until_stop() return True def run(self): if self.backup_id is None: backup_filter = BackupFilter() backup_filter.filter_non_pre_restore_backup() candidates = ListBackupAction(backup_filter=backup_filter, limit=1).run() if len(candidates) == 0: self.reply_tr('no_backup') return backup = candidates[0] else: backup = GetBackupAction(self.backup_id).run() self.__can_abort = True self.broadcast(self.tr('show_backup', TextComponents.backup_brief(backup))) if self.needs_confirm: if not self.wait_confirm(self.tr('confirm_target')): return server_was_running = self.server.is_server_running() if server_was_running: if not self.__countdown_and_stop_server(backup): return else: self.logger.info('Found an already-stopped server') self.__can_abort = False
timer = Timer()
16
2023-11-28 19:03:36+00:00
16k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n def __init__(self, headers=None, **kwargs):\n super(HTTPHeaderDict, self).__init__()\n self._container = OrderedDict()\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key, val):\n self._container[key.lower()] = [key, val]\n return self._container[key.lower()]\n\n def __getitem__(self, key):\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key):\n del self._container[key.lower()]\n\n def __contains__(self, key):\n return key.lower() in self._container\n\n def __eq__(self, other):\n if not isinstance(other, Mapping) and not hasattr(other, \"keys\"):\n return False\n if not isinstance(other, type(self)):\n other = type(self)(other)\n return dict((k.lower(), v) for k, v in self.itermerged()) == dict(\n (k.lower(), v) for k, v in other.itermerged()\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n if six.PY2: # Python 2\n iterkeys = MutableMapping.iterkeys\n itervalues = MutableMapping.itervalues\n\n __marker = object()\n\n def __len__(self):\n return len(self._container)\n\n def __iter__(self):\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def pop(self, key, default=__marker):\n \"\"\"D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n \"\"\"\n # Using the MutableMapping function directly fails due to the private marker.\n # Using ordinary dict.pop would expose the internal structures.\n # So let's reinvent the wheel.\n try:\n value = self[key]\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n del self[key]\n return value\n\n def discard(self, key):\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key, val):\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n \"\"\"\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n vals.append(val)\n\n def extend(self, *args, **kwargs):\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n \"extend() takes at most 1 positional \"\n \"arguments ({0} given)\".format(len(args))\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, Mapping):\n for key in other:\n self.add(key, other[key])\n elif hasattr(other, \"keys\"):\n for key in other.keys():\n self.add(key, other[key])\n else:\n for key, value in other:\n self.add(key, value)\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n def getlist(self, key, default=__marker):\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is self.__marker:\n return []\n return default\n else:\n return vals[1:]\n\n def _prepare_for_method_change(self):\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, dict(self.itermerged()))\n\n def _copy_from(self, other):\n for key in other:\n val = other.getlist(key)\n if isinstance(val, list):\n # Don't need to convert tuples\n val = list(val)\n self._container[key.lower()] = [key] + val\n\n def copy(self):\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self):\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self):\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self):\n return list(self.iteritems())\n\n @classmethod\n def from_httplib(cls, message): # Python 2\n \"\"\"Read headers from a Python 2 httplib message object.\"\"\"\n # python2.7 does not expose a proper API for exporting multiheaders\n # efficiently. This function re-reads raw lines from the message\n # object and extracts the multiheaders properly.\n obs_fold_continued_leaders = (\" \", \"\\t\")\n headers = []\n\n for line in message.headers:\n if line.startswith(obs_fold_continued_leaders):\n if not headers:\n # We received a header line that starts with OWS as described\n # in RFC-7230 S3.2.4. This indicates a multiline header, but\n # there exists no previous header to which we can attach it.\n raise InvalidHeader(\n \"Header continuation with no previous header: %s\" % line\n )\n else:\n key, value = headers[-1]\n headers[-1] = (key, value + \" \" + line.strip())\n continue\n\n key, value = line.split(\":\", 1)\n headers.append((key, value.strip()))\n\n return cls(headers)" }, { "identifier": "RecentlyUsedContainer", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(MutableMapping):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n ContainerCls = OrderedDict\n\n def __init__(self, maxsize=10, dispose_func=None):\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n\n self._container = self.ContainerCls()\n self.lock = RLock()\n\n def __getitem__(self, key):\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key, value):\n evicted_value = _Null\n with self.lock:\n # Possibly evict the existing value of 'key'\n evicted_value = self._container.get(key, _Null)\n self._container[key] = value\n\n # If we didn't evict an existing value, we might have to evict the\n # least recently used item from the beginning of the container.\n if len(self._container) > self._maxsize:\n _key, evicted_value = self._container.popitem(last=False)\n\n if self.dispose_func and evicted_value is not _Null:\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key):\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self):\n with self.lock:\n return len(self._container)\n\n def __iter__(self):\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self):\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(itervalues(self._container))\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self):\n with self.lock:\n return list(iterkeys(self._container))" }, { "identifier": "HTTPConnectionPool", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/connectionpool.py", "snippet": "class ConnectionPool(object):\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host, port=None):\n def __str__(self):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def close(self):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n _proxy_config=None,\n **conn_kw\n ):\n def _new_conn(self):\n def _get_conn(self, timeout=None):\n def _put_conn(self, conn):\n def _validate_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _get_timeout(self, timeout):\n def _raise_timeout(self, err, url, timeout_value):\n def _make_request(\n self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw\n ):\n def _absolute_url(self, path):\n def close(self):\n def is_same_host(self, url):\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n assert_same_host=True,\n timeout=_Default,\n pool_timeout=None,\n release_conn=None,\n chunked=False,\n body_pos=None,\n **response_kw\n ):\n def _is_ssl_error_message_from_http_proxy(ssl_error):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n ssl_version=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n **conn_kw\n ):\n def _prepare_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _new_conn(self):\n def _validate_conn(self, conn):\ndef connection_from_url(url, **kw):\ndef _normalize_host(host, scheme):\ndef _close_pool_connections(pool):" }, { "identifier": "LocationValueError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"\n\n pass" }, { "identifier": "MaxRetryError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param string url: The requested Url\n :param exceptions.Exception reason: The underlying error\n\n \"\"\"\n\n def __init__(self, pool, url, reason=None):\n self.reason = reason\n\n message = \"Max retries exceeded with url: %s (Caused by %r)\" % (url, reason)\n\n RequestError.__init__(self, pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme):\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = (\n \"Proxy URL had unsupported scheme %s, should use http:// or https://\"\n % scheme\n )\n super(ProxySchemeUnknown, self).__init__(message)" }, { "identifier": "ProxySchemeUnsupported", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnsupported(ValueError):\n \"\"\"Fetching HTTPS resources through HTTPS proxies is unsupported\"\"\"\n\n pass" }, { "identifier": "URLSchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme):\n message = \"Not supported URL scheme %s\" % scheme\n super(URLSchemeUnknown, self).__init__(message)\n\n self.scheme = scheme" }, { "identifier": "six", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/packages/six.py", "snippet": "PY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n MAXSIZE = sys.maxsize\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 63) - 1)\n class X(object):\nclass _LazyDescr(object):\nclass MovedModule(_LazyDescr):\nclass _LazyModule(types.ModuleType):\nclass MovedAttribute(_LazyDescr):\nclass _SixMetaPathImporter(object):\nclass _MovedItems(_LazyModule):\nclass Module_six_moves_urllib_parse(_LazyModule):\nclass Module_six_moves_urllib_error(_LazyModule):\nclass Module_six_moves_urllib_request(_LazyModule):\nclass Module_six_moves_urllib_response(_LazyModule):\nclass Module_six_moves_urllib_robotparser(_LazyModule):\nclass Module_six_moves_urllib(types.ModuleType):\n class Iterator(object):\n class metaclass(type):\n def __len__(self):\ndef _add_doc(func, doc):\ndef _import_module(name):\n def __init__(self, name):\n def __get__(self, obj, tp):\n def __init__(self, name, old, new=None):\n def _resolve(self):\n def __getattr__(self, attr):\n def __init__(self, name):\n def __dir__(self):\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n def _resolve(self):\n def __init__(self, six_module_name):\n def _add_module(self, mod, *fullnames):\n def _get_module(self, fullname):\n def find_module(self, fullname, path=None):\n def find_spec(self, fullname, path, target=None):\n def __get_module(self, fullname):\n def load_module(self, fullname):\n def is_package(self, fullname):\n def get_code(self, fullname):\n def create_module(self, spec):\n def exec_module(self, module):\n def __dir__(self):\ndef add_move(move):\ndef remove_move(name):\n def advance_iterator(it):\n def callable(obj):\n def get_unbound_function(unbound):\n def create_unbound_method(func, cls):\n def get_unbound_function(unbound):\n def create_bound_method(func, obj):\n def create_unbound_method(func, cls):\n def next(self):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def b(s):\n def u(s):\n def b(s):\n def u(s):\n def byte2int(bs):\n def indexbytes(buf, i):\ndef assertCountEqual(self, *args, **kwargs):\ndef assertRaisesRegex(self, *args, **kwargs):\ndef assertRegex(self, *args, **kwargs):\ndef assertNotRegex(self, *args, **kwargs):\n def reraise(tp, value, tb=None):\n def exec_(_code_, _globs_=None, _locs_=None):\n def raise_from(value, from_value):\n def print_(*args, **kwargs):\n def write(data):\n def print_(*args, **kwargs):\n def _update_wrapper(\n wrapper,\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\n def wraps(\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\ndef with_metaclass(meta, *bases):\n def __new__(cls, name, this_bases, d):\n def __prepare__(cls, name, this_bases):\ndef add_metaclass(metaclass):\n def wrapper(cls):\ndef ensure_binary(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_text(s, encoding=\"utf-8\", errors=\"strict\"):\ndef python_2_unicode_compatible(klass):" }, { "identifier": "RequestMethods", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/request.py", "snippet": "class RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **kw\n ): # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n urlopen_kw[\"request_url\"] = url\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method,\n url,\n fields=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **urlopen_kw\n ):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": {}}\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields),\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"] = {\"Content-Type\": content_type}\n\n extra_kw[\"headers\"].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "connection_requires_http_tunnel", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url=None, proxy_config=None, destination_scheme=None\n):\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/retry.py", "snippet": "class Retry(object):\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool::\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request('GET', 'http://example.com/')\n\n Or per-request (which overrides the default for the pool)::\n\n response = http.request('GET', 'http://example.com/', retries=Retry(10))\n\n Retries can be disabled by passing ``False``::\n\n response = http.request('GET', 'http://example.com/', retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param iterable allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``False`` value to retry on any verb.\n\n .. warning::\n\n Previously this parameter was named ``method_whitelist``, that\n usage is deprecated in v1.26.0 and will be removed in v2.0.\n\n :param iterable status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of total retries} - 1))\n\n seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep\n for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer\n than :attr:`Retry.DEFAULT_BACKOFF_MAX`.\n\n By default, backoff is disabled (set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param iterable remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n def __init__(\n self,\n total=10,\n connect=None,\n read=None,\n redirect=None,\n status=None,\n other=None,\n allowed_methods=_Default,\n status_forcelist=None,\n backoff_factor=0,\n raise_on_redirect=True,\n raise_on_status=True,\n history=None,\n respect_retry_after_header=True,\n remove_headers_on_redirect=_Default,\n # TODO: Deprecated, remove in v2.0\n method_whitelist=_Default,\n ):\n\n if method_whitelist is not _Default:\n if allowed_methods is not _Default:\n raise ValueError(\n \"Using both 'allowed_methods' and \"\n \"'method_whitelist' together is not allowed. \"\n \"Instead only use 'allowed_methods'\"\n )\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n allowed_methods = method_whitelist\n if allowed_methods is _Default:\n allowed_methods = self.DEFAULT_ALLOWED_METHODS\n if remove_headers_on_redirect is _Default:\n remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or tuple()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n [h.lower() for h in remove_headers_on_redirect]\n )\n\n def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n )\n\n # TODO: If already given in **kw we use what's given to us\n # If not given we need to figure out what to pass. We decide\n # based on whether our class has the 'method_whitelist' property\n # and if so we pass the deprecated 'method_whitelist' otherwise\n # we use 'allowed_methods'. Remove in v2.0\n if \"method_whitelist\" not in kw and \"allowed_methods\" not in kw:\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n params[\"method_whitelist\"] = self.allowed_methods\n else:\n params[\"allowed_methods\"] = self.allowed_methods\n\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect=True, default=None):\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n return min(self.DEFAULT_BACKOFF_MAX, backoff_value)\n\n def parse_retry_after(self, retry_after):\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(\"Invalid Retry-After header: %s\" % retry_after)\n if retry_date_tuple[9] is None: # Python 2\n # Assume UTC if no timezone was specified\n # On Python2.7, parsedate_tz returns None for a timezone offset\n # instead of 0 if no timezone is given, where mktime_tz treats\n # a None timezone offset as local time.\n retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n if seconds < 0:\n seconds = 0\n\n return seconds\n\n def get_retry_after(self, response):\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response=None):\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response=None):\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err):\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method):\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n # TODO: For now favor if the Retry implementation sets its own method_whitelist\n # property outside of our constructor to avoid breaking custom implementations.\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n allowed_methods = self.method_whitelist\n else:\n allowed_methods = self.allowed_methods\n\n if allowed_methods and method.upper() not in allowed_methods:\n return False\n return True\n\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return (\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self):\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method=None,\n url=None,\n response=None,\n error=None,\n _pool=None,\n _stacktrace=None,\n ):\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.HTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise six.reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or not self._is_method_retryable(method):\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n redirect_location = response.get_redirect_location()\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self):\n return (\n \"{cls.__name__}(total={self.total}, connect={self.connect}, \"\n \"read={self.read}, redirect={self.redirect}, status={self.status})\"\n ).format(cls=type(self), self=self)\n\n def __getattr__(self, item):\n if item == \"method_whitelist\":\n # TODO: Remove this deprecated alias in v2.0\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n return self.allowed_methods\n try:\n return getattr(super(Retry, self), item)\n except AttributeError:\n return getattr(Retry, item)" }, { "identifier": "parse_url", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/url.py", "snippet": "def parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not SCHEME_RE.search(url):\n url = \"//\" + url\n\n try:\n scheme, authority, path, query, fragment = URI_RE.match(url).groups()\n normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups()\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port = int(port)\n if not (0 <= port <= 65535):\n raise LocationParseError(url)\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)\n\n except (ValueError, AttributeError):\n return six.raise_from(LocationParseError(source_url), None)\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n # Ensure that each part of the URL is a `str` for\n # backwards compatibility.\n if isinstance(url, six.text_type):\n ensure_func = six.ensure_text\n else:\n ensure_func = six.ensure_str\n\n def ensure_type(x):\n return x if x is None else ensure_func(x)\n\n return Url(\n scheme=ensure_type(scheme),\n auth=ensure_type(auth),\n host=ensure_type(host),\n port=port,\n path=ensure_type(path),\n query=ensure_type(query),\n fragment=ensure_type(fragment),\n )" } ]
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,158
if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port:
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
2
2023-11-27 07:01:39+00:00
16k
TACJu/MaXTron
MaXTron_Tube-Link/mmdet/datasets/coco_panoptic.py
[ { "identifier": "COCO", "path": "MaXTron_Tube-Link/mmdet/datasets/api_wrappers/coco_api.py", "snippet": "class COCO(_COCO):\n \"\"\"This class is almost the same as official pycocotools package.\n\n It implements some snake case function aliases. So that the COCO class has\n the same interface as LVIS class.\n \"\"\"\n\n def __init__(self, annotation_file=None):\n if getattr(pycocotools, '__version__', '0') >= '12.0.2':\n warnings.warn(\n 'mmpycocotools is deprecated. Please install official pycocotools by \"pip install pycocotools\"', # noqa: E501\n UserWarning)\n super().__init__(annotation_file=annotation_file)\n self.img_ann_map = self.imgToAnns\n self.cat_img_map = self.catToImgs\n\n def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):\n return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)\n\n def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):\n return self.getCatIds(cat_names, sup_names, cat_ids)\n\n def get_img_ids(self, img_ids=[], cat_ids=[]):\n return self.getImgIds(img_ids, cat_ids)\n\n def load_anns(self, ids):\n return self.loadAnns(ids)\n\n def load_cats(self, ids):\n return self.loadCats(ids)\n\n def load_imgs(self, ids):\n return self.loadImgs(ids)" }, { "identifier": "pq_compute_multi_core", "path": "MaXTron_Tube-Link/mmdet/datasets/api_wrappers/panoptic_evaluation.py", "snippet": "def pq_compute_multi_core(matched_annotations_list,\n gt_folder,\n pred_folder,\n categories,\n file_client=None,\n nproc=32):\n \"\"\"Evaluate the metrics of Panoptic Segmentation with multithreading.\n\n Same as the function with the same name in `panopticapi`.\n\n Args:\n matched_annotations_list (list): The matched annotation list. Each\n element is a tuple of annotations of the same image with the\n format (gt_anns, pred_anns).\n gt_folder (str): The path of the ground truth images.\n pred_folder (str): The path of the prediction images.\n categories (str): The categories of the dataset.\n file_client (object): The file client of the dataset. If None,\n the backend will be set to `disk`.\n nproc (int): Number of processes for panoptic quality computing.\n Defaults to 32. When `nproc` exceeds the number of cpu cores,\n the number of cpu cores is used.\n \"\"\"\n if PQStat is None:\n raise RuntimeError(\n 'panopticapi is not installed, please install it by: '\n 'pip install git+https://github.com/cocodataset/'\n 'panopticapi.git.')\n\n if file_client is None:\n file_client_args = dict(backend='disk')\n file_client = mmcv.FileClient(**file_client_args)\n\n cpu_num = min(nproc, multiprocessing.cpu_count())\n\n annotations_split = np.array_split(matched_annotations_list, cpu_num)\n print('Number of cores: {}, images per core: {}'.format(\n cpu_num, len(annotations_split[0])))\n workers = multiprocessing.Pool(processes=cpu_num)\n processes = []\n for proc_id, annotation_set in enumerate(annotations_split):\n p = workers.apply_async(pq_compute_single_core,\n (proc_id, annotation_set, gt_folder,\n pred_folder, categories, file_client))\n processes.append(p)\n\n # Close the process pool, otherwise it will lead to memory\n # leaking problems.\n workers.close()\n workers.join()\n\n pq_stat = PQStat()\n for p in processes:\n pq_stat += p.get()\n\n return pq_stat" }, { "identifier": "DATASETS", "path": "MaXTron_Tube-Link/mmdet/datasets/builder.py", "snippet": "DATASETS = Registry('dataset')" }, { "identifier": "CocoDataset", "path": "MaXTron_Tube-Link/mmdet/datasets/coco.py", "snippet": "class CocoDataset(CustomDataset):\n\n CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',\n 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',\n 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',\n 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',\n 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')\n\n PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230),\n (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70),\n (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0),\n (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255),\n (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157),\n (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118),\n (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182),\n (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255),\n (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255),\n (134, 134, 103), (145, 148, 174), (255, 208, 186),\n (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255),\n (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105),\n (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149),\n (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205),\n (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0),\n (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88),\n (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118),\n (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15),\n (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0),\n (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122),\n (191, 162, 208)]\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n \"\"\"\n\n self.coco = COCO(ann_file)\n # The order of returned `cat_ids` will not\n # change with the order of the CLASSES\n self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.img_ids = self.coco.get_img_ids()\n data_infos = []\n total_ann_ids = []\n for i in self.img_ids:\n info = self.coco.load_imgs([i])[0]\n info['filename'] = info['file_name']\n data_infos.append(info)\n ann_ids = self.coco.get_ann_ids(img_ids=[i])\n total_ann_ids.extend(ann_ids)\n assert len(set(total_ann_ids)) == len(\n total_ann_ids), f\"Annotation ids in '{ann_file}' are not unique!\"\n return data_infos\n\n def get_ann_info(self, idx):\n \"\"\"Get COCO annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n \"\"\"\n\n img_id = self.data_infos[idx]['id']\n ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n ann_info = self.coco.load_anns(ann_ids)\n return self._parse_ann_info(self.data_infos[idx], ann_info)\n\n def get_cat_ids(self, idx):\n \"\"\"Get COCO category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n \"\"\"\n\n img_id = self.data_infos[idx]['id']\n ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n ann_info = self.coco.load_anns(ann_ids)\n return [ann['category_id'] for ann in ann_info]\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small or without ground truths.\"\"\"\n valid_inds = []\n # obtain images that contain annotation\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n # obtain images that contain annotations of the required categories\n ids_in_cat = set()\n for i, class_id in enumerate(self.cat_ids):\n ids_in_cat |= set(self.coco.cat_img_map[class_id])\n # merge the image id sets of the two conditions and use the merged set\n # to filter out images if self.filter_empty_gt=True\n ids_in_cat &= ids_with_ann\n\n valid_img_ids = []\n for i, img_info in enumerate(self.data_infos):\n img_id = self.img_ids[i]\n if self.filter_empty_gt and img_id not in ids_in_cat:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n valid_img_ids.append(img_id)\n self.img_ids = valid_img_ids\n return valid_inds\n\n def _parse_ann_info(self, img_info, ann_info):\n \"\"\"Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\\\n labels, masks, seg_map. \"masks\" are raw annotations and not \\\n decoded into binary masks.\n \"\"\"\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))\n inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))\n if inter_w * inter_h == 0:\n continue\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n if ann['category_id'] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann.get('segmentation', None))\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info['filename'].replace('jpg', 'png')\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=seg_map)\n\n return ann\n\n def xyxy2xywh(self, bbox):\n \"\"\"Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n evaluation.\n\n Args:\n bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n ``xyxy`` order.\n\n Returns:\n list[float]: The converted bounding boxes, in ``xywh`` order.\n \"\"\"\n\n _bbox = bbox.tolist()\n return [\n _bbox[0],\n _bbox[1],\n _bbox[2] - _bbox[0],\n _bbox[3] - _bbox[1],\n ]\n\n def _proposal2json(self, results):\n \"\"\"Convert proposal results to COCO json style.\"\"\"\n json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n bboxes = results[idx]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = 1\n json_results.append(data)\n return json_results\n\n def _det2json(self, results):\n \"\"\"Convert detection results to COCO json style.\"\"\"\n json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n result = results[idx]\n for label in range(len(result)):\n bboxes = result[label]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = self.cat_ids[label]\n json_results.append(data)\n return json_results\n\n def _segm2json(self, results):\n \"\"\"Convert instance segmentation results to COCO json style.\"\"\"\n bbox_json_results = []\n segm_json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n det, seg = results[idx]\n for label in range(len(det)):\n # bbox results\n bboxes = det[label]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = self.cat_ids[label]\n bbox_json_results.append(data)\n\n # segm results\n # some detectors use different scores for bbox and mask\n if isinstance(seg, tuple):\n segms = seg[0][label]\n mask_score = seg[1][label]\n else:\n segms = seg[label]\n mask_score = [bbox[4] for bbox in bboxes]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(mask_score[i])\n data['category_id'] = self.cat_ids[label]\n if isinstance(segms[i]['counts'], bytes):\n segms[i]['counts'] = segms[i]['counts'].decode()\n data['segmentation'] = segms[i]\n segm_json_results.append(data)\n return bbox_json_results, segm_json_results\n\n def results2json(self, results, outfile_prefix):\n \"\"\"Dump the detection results to a COCO style json file.\n\n There are 3 types of results: proposals, bbox predictions, mask\n predictions, and they have different data types. This method will\n automatically recognize the type, and dump them to json files.\n\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files. If the\n prefix is \"somepath/xxx\", the json files will be named\n \"somepath/xxx.bbox.json\", \"somepath/xxx.segm.json\",\n \"somepath/xxx.proposal.json\".\n\n Returns:\n dict[str: str]: Possible keys are \"bbox\", \"segm\", \"proposal\", and \\\n values are corresponding filenames.\n \"\"\"\n result_files = dict()\n if isinstance(results[0], list):\n json_results = self._det2json(results)\n result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n mmcv.dump(json_results, result_files['bbox'])\n elif isinstance(results[0], tuple):\n json_results = self._segm2json(results)\n result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n result_files['segm'] = f'{outfile_prefix}.segm.json'\n mmcv.dump(json_results[0], result_files['bbox'])\n mmcv.dump(json_results[1], result_files['segm'])\n elif isinstance(results[0], np.ndarray):\n json_results = self._proposal2json(results)\n result_files['proposal'] = f'{outfile_prefix}.proposal.json'\n mmcv.dump(json_results, result_files['proposal'])\n else:\n raise TypeError('invalid type of results')\n return result_files\n\n def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):\n gt_bboxes = []\n for i in range(len(self.img_ids)):\n ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])\n ann_info = self.coco.load_anns(ann_ids)\n if len(ann_info) == 0:\n gt_bboxes.append(np.zeros((0, 4)))\n continue\n bboxes = []\n for ann in ann_info:\n if ann.get('ignore', False) or ann['iscrowd']:\n continue\n x1, y1, w, h = ann['bbox']\n bboxes.append([x1, y1, x1 + w, y1 + h])\n bboxes = np.array(bboxes, dtype=np.float32)\n if bboxes.shape[0] == 0:\n bboxes = np.zeros((0, 4))\n gt_bboxes.append(bboxes)\n\n recalls = eval_recalls(\n gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)\n ar = recalls.mean(axis=1)\n return ar\n\n def format_results(self, results, jsonfile_prefix=None, **kwargs):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing \\\n the json filepaths, tmp_dir is the temporal directory created \\\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n result_files = self.results2json(results, jsonfile_prefix)\n return result_files, tmp_dir\n\n def evaluate_det_segm(self,\n results,\n result_files,\n coco_gt,\n metrics,\n logger=None,\n classwise=False,\n proposal_nums=(100, 300, 1000),\n iou_thrs=None,\n metric_items=None):\n \"\"\"Instance segmentation and object detection evaluation in COCO\n protocol.\n\n Args:\n results (list[list | tuple | dict]): Testing results of the\n dataset.\n result_files (dict[str, str]): a dict contains json file path.\n coco_gt (COCO): COCO API object with ground truth annotation.\n metric (str | list[str]): Metrics to be evaluated. Options are\n 'bbox', 'segm', 'proposal', 'proposal_fast'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float], optional): IoU threshold used for\n evaluating recalls/mAPs. If set to a list, the average of all\n IoUs will also be computed. If not specified, [0.50, 0.55,\n 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n Default: None.\n metric_items (list[str] | str, optional): Metric items that will\n be returned. If not specified, ``['AR@100', 'AR@300',\n 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be\n used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',\n 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when\n ``metric=='bbox' or metric=='segm'``.\n\n Returns:\n dict[str, float]: COCO style evaluation metric.\n \"\"\"\n if iou_thrs is None:\n iou_thrs = np.linspace(\n .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n if metric_items is not None:\n if not isinstance(metric_items, list):\n metric_items = [metric_items]\n\n eval_results = OrderedDict()\n for metric in metrics:\n msg = f'Evaluating {metric}...'\n if logger is None:\n msg = '\\n' + msg\n print_log(msg, logger=logger)\n\n if metric == 'proposal_fast':\n if isinstance(results[0], tuple):\n raise KeyError('proposal_fast is not supported for '\n 'instance segmentation result.')\n ar = self.fast_eval_recall(\n results, proposal_nums, iou_thrs, logger='silent')\n log_msg = []\n for i, num in enumerate(proposal_nums):\n eval_results[f'AR@{num}'] = ar[i]\n log_msg.append(f'\\nAR@{num}\\t{ar[i]:.4f}')\n log_msg = ''.join(log_msg)\n print_log(log_msg, logger=logger)\n continue\n\n iou_type = 'bbox' if metric == 'proposal' else metric\n if metric not in result_files:\n raise KeyError(f'{metric} is not in results')\n try:\n predictions = mmcv.load(result_files[metric])\n if iou_type == 'segm':\n # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa\n # When evaluating mask AP, if the results contain bbox,\n # cocoapi will use the box area instead of the mask area\n # for calculating the instance area. Though the overall AP\n # is not affected, this leads to different\n # small/medium/large mask AP results.\n for x in predictions:\n x.pop('bbox')\n warnings.simplefilter('once')\n warnings.warn(\n 'The key \"bbox\" is deleted for more accurate mask AP '\n 'of small/medium/large instances since v2.12.0. This '\n 'does not change the overall mAP calculation.',\n UserWarning)\n coco_det = coco_gt.loadRes(predictions)\n except IndexError:\n print_log(\n 'The testing results of the whole dataset is empty.',\n logger=logger,\n level=logging.ERROR)\n break\n\n cocoEval = COCOeval(coco_gt, coco_det, iou_type)\n cocoEval.params.catIds = self.cat_ids\n cocoEval.params.imgIds = self.img_ids\n cocoEval.params.maxDets = list(proposal_nums)\n cocoEval.params.iouThrs = iou_thrs\n # mapping of cocoEval.stats\n coco_metric_names = {\n 'mAP': 0,\n 'mAP_50': 1,\n 'mAP_75': 2,\n 'mAP_s': 3,\n 'mAP_m': 4,\n 'mAP_l': 5,\n 'AR@100': 6,\n 'AR@300': 7,\n 'AR@1000': 8,\n 'AR_s@1000': 9,\n 'AR_m@1000': 10,\n 'AR_l@1000': 11\n }\n if metric_items is not None:\n for metric_item in metric_items:\n if metric_item not in coco_metric_names:\n raise KeyError(\n f'metric item {metric_item} is not supported')\n\n if metric == 'proposal':\n cocoEval.params.useCats = 0\n cocoEval.evaluate()\n cocoEval.accumulate()\n\n # Save coco summarize print information to logger\n redirect_string = io.StringIO()\n with contextlib.redirect_stdout(redirect_string):\n cocoEval.summarize()\n print_log('\\n' + redirect_string.getvalue(), logger=logger)\n\n if metric_items is None:\n metric_items = [\n 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',\n 'AR_m@1000', 'AR_l@1000'\n ]\n\n for item in metric_items:\n val = float(\n f'{cocoEval.stats[coco_metric_names[item]]:.3f}')\n eval_results[item] = val\n else:\n cocoEval.evaluate()\n cocoEval.accumulate()\n\n # Save coco summarize print information to logger\n redirect_string = io.StringIO()\n with contextlib.redirect_stdout(redirect_string):\n cocoEval.summarize()\n print_log('\\n' + redirect_string.getvalue(), logger=logger)\n\n if classwise: # Compute per-category AP\n # Compute per-category AP\n # from https://github.com/facebookresearch/detectron2/\n precisions = cocoEval.eval['precision']\n # precision: (iou, recall, cls, area range, max dets)\n assert len(self.cat_ids) == precisions.shape[2]\n\n results_per_category = []\n for idx, catId in enumerate(self.cat_ids):\n # area range index 0: all area ranges\n # max dets index -1: typically 100 per image\n nm = self.coco.loadCats(catId)[0]\n precision = precisions[:, :, idx, 0, -1]\n precision = precision[precision > -1]\n if precision.size:\n ap = np.mean(precision)\n else:\n ap = float('nan')\n results_per_category.append(\n (f'{nm[\"name\"]}', f'{float(ap):0.3f}'))\n\n num_columns = min(6, len(results_per_category) * 2)\n results_flatten = list(\n itertools.chain(*results_per_category))\n headers = ['category', 'AP'] * (num_columns // 2)\n results_2d = itertools.zip_longest(*[\n results_flatten[i::num_columns]\n for i in range(num_columns)\n ])\n table_data = [headers]\n table_data += [result for result in results_2d]\n table = AsciiTable(table_data)\n print_log('\\n' + table.table, logger=logger)\n\n if metric_items is None:\n metric_items = [\n 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'\n ]\n\n for metric_item in metric_items:\n key = f'{metric}_{metric_item}'\n val = float(\n f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'\n )\n eval_results[key] = val\n ap = cocoEval.stats[:6]\n eval_results[f'{metric}_mAP_copypaste'] = (\n f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '\n f'{ap[4]:.3f} {ap[5]:.3f}')\n\n return eval_results\n\n def evaluate(self,\n results,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n classwise=False,\n proposal_nums=(100, 300, 1000),\n iou_thrs=None,\n metric_items=None):\n \"\"\"Evaluation in COCO protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n 'bbox', 'segm', 'proposal', 'proposal_fast'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float], optional): IoU threshold used for\n evaluating recalls/mAPs. If set to a list, the average of all\n IoUs will also be computed. If not specified, [0.50, 0.55,\n 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n Default: None.\n metric_items (list[str] | str, optional): Metric items that will\n be returned. If not specified, ``['AR@100', 'AR@300',\n 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be\n used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',\n 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when\n ``metric=='bbox' or metric=='segm'``.\n\n Returns:\n dict[str, float]: COCO style evaluation metric.\n \"\"\"\n\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n coco_gt = self.coco\n self.cat_ids = coco_gt.get_cat_ids(cat_names=self.CLASSES)\n\n result_files, tmp_dir = self.format_results(results, jsonfile_prefix)\n eval_results = self.evaluate_det_segm(results, result_files, coco_gt,\n metrics, logger, classwise,\n proposal_nums, iou_thrs,\n metric_items)\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n return eval_results" } ]
import itertools import os import mmcv import numpy as np import panopticapi from collections import defaultdict from mmcv.utils import print_log from terminaltables import AsciiTable from mmdet.core import INSTANCE_OFFSET from .api_wrappers import COCO, pq_compute_multi_core from .builder import DATASETS from .coco import CocoDataset from panopticapi.evaluation import VOID from panopticapi.utils import id2rgb
11,507
pred_annotations = [] outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') for idx in range(len(self)): img_id = self.img_ids[idx] segm_file = self.data_infos[idx]['segm_file'] pan = results[idx] pan_labels = np.unique(pan) segm_info = [] for pan_label in pan_labels: sem_label = pan_label % INSTANCE_OFFSET # We reserve the length of self.CLASSES for VOID label if sem_label == len(self.CLASSES): continue # convert sem_label to json label cat_id = label2cat[sem_label] is_thing = self.categories[cat_id]['isthing'] mask = pan == pan_label area = mask.sum() segm_info.append({ 'id': int(pan_label), 'category_id': cat_id, 'isthing': is_thing, 'area': int(area) }) # evaluation script uses 0 for VOID label. pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID pan = id2rgb(pan).astype(np.uint8) mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file)) record = { 'image_id': img_id, 'segments_info': segm_info, 'file_name': segm_file } pred_annotations.append(record) pan_json_results = dict(annotations=pred_annotations) return pan_json_results def results2json(self, results, outfile_prefix): """Dump the results to a COCO style json file. There are 4 types of results: proposals, bbox predictions, mask predictions, panoptic segmentation predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. .. code-block:: none [ { 'pan_results': np.array, # shape (h, w) # ins_results which includes bboxes and RLE encoded masks # is optional. 'ins_results': (list[np.array], list[list[str]]) }, ... ] Args: results (list[dict]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.panoptic.json", "somepath/xxx.bbox.json", "somepath/xxx.segm.json" Returns: dict[str: str]: Possible keys are "panoptic", "bbox", "segm", \ "proposal", and values are corresponding filenames. """ result_files = dict() # panoptic segmentation results if 'pan_results' in results[0]: pan_results = [result['pan_results'] for result in results] pan_json_results = self._pan2json(pan_results, outfile_prefix) result_files['panoptic'] = f'{outfile_prefix}.panoptic.json' mmcv.dump(pan_json_results, result_files['panoptic']) # instance segmentation results if 'ins_results' in results[0]: ins_results = [result['ins_results'] for result in results] bbox_json_results, segm_json_results = self._segm2json(ins_results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(bbox_json_results, result_files['bbox']) mmcv.dump(segm_json_results, result_files['segm']) return result_files def evaluate_pan_json(self, result_files, outfile_prefix, logger=None, classwise=False, nproc=32): """Evaluate PQ according to the panoptic results json file.""" imgs = self.coco.imgs gt_json = self.coco.img_ann_map # image to annotations gt_json = [{ 'image_id': k, 'segments_info': v, 'file_name': imgs[k]['segm_file'] } for k, v in gt_json.items()] pred_json = mmcv.load(result_files['panoptic']) pred_json = dict( (el['image_id'], el) for el in pred_json['annotations']) # match the gt_anns and pred_anns in the same image matched_annotations_list = [] for gt_ann in gt_json: img_id = gt_ann['image_id'] if img_id not in pred_json.keys(): raise Exception('no prediction for the image' ' with id: {}'.format(img_id)) matched_annotations_list.append((gt_ann, pred_json[img_id])) gt_folder = self.seg_prefix pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
# Copyright (c) OpenMMLab. All rights reserved. # This file has been modified. try: except ImportError: panopticapi = None id2rgb = None VOID = None __all__ = ['CocoPanopticDataset'] class COCOPanoptic(COCO): """This wrapper is for loading the panoptic style annotation file. The format is shown in the CocoPanopticDataset class. Args: annotation_file (str): Path of annotation file. """ def __init__(self, annotation_file=None): if panopticapi is None: raise RuntimeError( 'panopticapi is not installed, please install it by: ' 'pip install git+https://github.com/cocodataset/' 'panopticapi.git.') super(COCOPanoptic, self).__init__(annotation_file) # load_ext for compatibility of nv pycocotools def createIndex(self, load_ext=False): assert not load_ext # create index print('creating index...') # anns stores 'segment_id -> annotation' anns, cats, imgs = {}, {}, {} img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list) if 'annotations' in self.dataset: for ann, img_info in zip(self.dataset['annotations'], self.dataset['images']): img_info['segm_file'] = ann['file_name'] for seg_ann in ann['segments_info']: # to match with instance.json seg_ann['image_id'] = ann['image_id'] seg_ann['height'] = img_info['height'] seg_ann['width'] = img_info['width'] img_to_anns[ann['image_id']].append(seg_ann) # segment_id is not unique in coco dataset orz... if seg_ann['id'] in anns.keys(): anns[seg_ann['id']].append(seg_ann) else: anns[seg_ann['id']] = [seg_ann] if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat if 'annotations' in self.dataset and 'categories' in self.dataset: for ann in self.dataset['annotations']: for seg_ann in ann['segments_info']: cat_to_imgs[seg_ann['category_id']].append(ann['image_id']) print('index created!') self.anns = anns self.imgToAnns = img_to_anns self.catToImgs = cat_to_imgs self.imgs = imgs self.cats = cats def load_anns(self, ids=[]): """Load anns with the specified ids. self.anns is a list of annotation lists instead of a list of annotations. Args: ids (int array): integer ids specifying anns Returns: anns (object array): loaded ann objects """ anns = [] if hasattr(ids, '__iter__') and hasattr(ids, '__len__'): # self.anns is a list of annotation lists instead of # a list of annotations for id in ids: anns += self.anns[id] return anns elif type(ids) == int: return self.anns[ids] @DATASETS.register_module() class CocoPanopticDataset(CocoDataset): """Coco dataset for Panoptic segmentation. The annotation format is shown as follows. The `ann` field is optional for testing. .. code-block:: none [ { 'filename': f'{image_id:012}.png', 'image_id':9 'segments_info': { [ { 'id': 8345037, (segment_id in panoptic png, convert from rgb) 'category_id': 51, 'iscrowd': 0, 'bbox': (x1, y1, w, h), 'area': 24315, 'segmentation': list,(coded mask) }, ... } } }, ... ] Args: ann_file (str): Panoptic segmentation annotation file path. pipeline (list[dict]): Processing pipeline. ins_ann_file (str): Instance segmentation annotation file path. Defaults to None. classes (str | Sequence[str], optional): Specify classes to load. If is None, ``cls.CLASSES`` will be used. Defaults to None. data_root (str, optional): Data root for ``ann_file``, ``ins_ann_file`` ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. Defaults to None. img_prefix (str, optional): Prefix of path to images. Defaults to ''. seg_prefix (str, optional): Prefix of path to segmentation files. Defaults to None. proposal_file (str, optional): Path to proposal file. Defaults to None. test_mode (bool, optional): If set True, annotation will not be loaded. Defaults to False. filter_empty_gt (bool, optional): If set true, images without bounding boxes of the dataset's classes will be filtered out. This option only works when `test_mode=False`, i.e., we never filter images during tests. Defaults to True. file_client_args (:obj:`mmcv.ConfigDict` | dict): file client args. Defaults to dict(backend='disk'). """ CLASSES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] THING_CLASSES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] STUFF_CLASSES = [ 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), (134, 134, 103), (145, 148, 174), (255, 208, 186), (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122), (191, 162, 208), (255, 255, 128), (147, 211, 203), (150, 100, 100), (168, 171, 172), (146, 112, 198), (210, 170, 100), (92, 136, 89), (218, 88, 184), (241, 129, 0), (217, 17, 255), (124, 74, 181), (70, 70, 70), (255, 228, 255), (154, 208, 0), (193, 0, 92), (76, 91, 113), (255, 180, 195), (106, 154, 176), (230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55), (254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255), (104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74), (135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149), (183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153), (146, 139, 141), (70, 130, 180), (134, 199, 156), (209, 226, 140), (96, 36, 108), (96, 96, 96), (64, 170, 64), (152, 251, 152), (208, 229, 228), (206, 186, 171), (152, 161, 64), (116, 112, 0), (0, 114, 143), (102, 102, 156), (250, 141, 255)] def __init__(self, ann_file, pipeline, ins_ann_file=None, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True, file_client_args=dict(backend='disk')): super().__init__( ann_file, pipeline, classes=classes, data_root=data_root, img_prefix=img_prefix, seg_prefix=seg_prefix, proposal_file=proposal_file, test_mode=test_mode, filter_empty_gt=filter_empty_gt, file_client_args=file_client_args) self.ins_ann_file = ins_ann_file def load_annotations(self, ann_file): """Load annotation from COCO Panoptic style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCOPanoptic(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.categories = self.coco.cats self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] info['segm_file'] = info['filename'].replace('jpg', 'png') data_infos.append(info) return data_infos def get_ann_info(self, idx): """Get COCO annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) # filter out unmatched images ann_info = [i for i in ann_info if i['image_id'] == img_id] return self._parse_ann_info(self.data_infos[idx], ann_info) def _parse_ann_info(self, img_info, ann_info): """Parse annotations and load panoptic ground truths. Args: img_info (int): Image info of an image. ann_info (list[dict]): Annotation info of an image. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_mask_infos = [] for i, ann in enumerate(ann_info): x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w < 1 or h < 1: continue bbox = [x1, y1, x1 + w, y1 + h] category_id = ann['category_id'] contiguous_cat_id = self.cat2label[category_id] is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] if is_thing: is_crowd = ann.get('iscrowd', False) if not is_crowd: gt_bboxes.append(bbox) gt_labels.append(contiguous_cat_id) else: gt_bboxes_ignore.append(bbox) is_thing = False mask_info = { 'id': ann['id'], 'category': contiguous_cat_id, 'is_thing': is_thing } gt_mask_infos.append(mask_info) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_mask_infos, seg_map=img_info['segm_file']) return ann def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" ids_with_ann = [] # check whether images have legal thing annotations. for lists in self.coco.anns.values(): for item in lists: category_id = item['category_id'] is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] if not is_thing: continue ids_with_ann.append(item['image_id']) ids_with_ann = set(ids_with_ann) valid_inds = [] valid_img_ids = [] for i, img_info in enumerate(self.data_infos): img_id = self.img_ids[i] if self.filter_empty_gt and img_id not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _pan2json(self, results, outfile_prefix): """Convert panoptic results to COCO panoptic json style.""" label2cat = dict((v, k) for (k, v) in self.cat2label.items()) pred_annotations = [] outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') for idx in range(len(self)): img_id = self.img_ids[idx] segm_file = self.data_infos[idx]['segm_file'] pan = results[idx] pan_labels = np.unique(pan) segm_info = [] for pan_label in pan_labels: sem_label = pan_label % INSTANCE_OFFSET # We reserve the length of self.CLASSES for VOID label if sem_label == len(self.CLASSES): continue # convert sem_label to json label cat_id = label2cat[sem_label] is_thing = self.categories[cat_id]['isthing'] mask = pan == pan_label area = mask.sum() segm_info.append({ 'id': int(pan_label), 'category_id': cat_id, 'isthing': is_thing, 'area': int(area) }) # evaluation script uses 0 for VOID label. pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID pan = id2rgb(pan).astype(np.uint8) mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file)) record = { 'image_id': img_id, 'segments_info': segm_info, 'file_name': segm_file } pred_annotations.append(record) pan_json_results = dict(annotations=pred_annotations) return pan_json_results def results2json(self, results, outfile_prefix): """Dump the results to a COCO style json file. There are 4 types of results: proposals, bbox predictions, mask predictions, panoptic segmentation predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. .. code-block:: none [ { 'pan_results': np.array, # shape (h, w) # ins_results which includes bboxes and RLE encoded masks # is optional. 'ins_results': (list[np.array], list[list[str]]) }, ... ] Args: results (list[dict]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.panoptic.json", "somepath/xxx.bbox.json", "somepath/xxx.segm.json" Returns: dict[str: str]: Possible keys are "panoptic", "bbox", "segm", \ "proposal", and values are corresponding filenames. """ result_files = dict() # panoptic segmentation results if 'pan_results' in results[0]: pan_results = [result['pan_results'] for result in results] pan_json_results = self._pan2json(pan_results, outfile_prefix) result_files['panoptic'] = f'{outfile_prefix}.panoptic.json' mmcv.dump(pan_json_results, result_files['panoptic']) # instance segmentation results if 'ins_results' in results[0]: ins_results = [result['ins_results'] for result in results] bbox_json_results, segm_json_results = self._segm2json(ins_results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(bbox_json_results, result_files['bbox']) mmcv.dump(segm_json_results, result_files['segm']) return result_files def evaluate_pan_json(self, result_files, outfile_prefix, logger=None, classwise=False, nproc=32): """Evaluate PQ according to the panoptic results json file.""" imgs = self.coco.imgs gt_json = self.coco.img_ann_map # image to annotations gt_json = [{ 'image_id': k, 'segments_info': v, 'file_name': imgs[k]['segm_file'] } for k, v in gt_json.items()] pred_json = mmcv.load(result_files['panoptic']) pred_json = dict( (el['image_id'], el) for el in pred_json['annotations']) # match the gt_anns and pred_anns in the same image matched_annotations_list = [] for gt_ann in gt_json: img_id = gt_ann['image_id'] if img_id not in pred_json.keys(): raise Exception('no prediction for the image' ' with id: {}'.format(img_id)) matched_annotations_list.append((gt_ann, pred_json[img_id])) gt_folder = self.seg_prefix pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
pq_stat = pq_compute_multi_core(
1
2023-12-01 20:08:54+00:00
16k
IanYeung/MGLD-VSR
basicsr/data/realbasicvsr_dataset.py
[ { "identifier": "Clip", "path": "basicsr/data/mmcv_transforms/aug_pix.py", "snippet": "class Clip(BaseTransform):\n \"\"\"Clip the pixels.\n\n Modified keys are the attributes specified in \"keys\".\n\n Args:\n keys (list[str]): The keys whose values are clipped.\n a_min (int): Lower limits of pixel value.\n a_max (int): Upper limits of pixel value.\n \"\"\"\n\n def __init__(self, keys, a_min=0, a_max=255):\n\n self.keys = keys\n self.a_min = a_min\n self.a_max = a_max\n\n def _clip(self, input_):\n \"\"\"Clip the pixels.\n\n Args:\n input_ (Union[List, np.ndarray]): Pixels to clip.\n\n Returns:\n Union[List, np.ndarray]: Clipped pixels.\n \"\"\"\n is_single_image = False\n if isinstance(input_, np.ndarray):\n is_single_image = True\n input_ = [input_]\n\n # clip\n input_ = [np.clip(v, self.a_min, self.a_max) for v in input_]\n\n if is_single_image:\n input_ = input_[0]\n\n return input_\n\n def transform(self, results):\n \"\"\"transform function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict with the values of the specified keys are rounded\n and clipped.\n \"\"\"\n\n for key in self.keys:\n results[key] = self._clip(results[key])\n\n return results\n\n def __repr__(self):\n\n result = self.__class__.__name__\n result += f'(a_min={self.a_min}, a_max={self.a_max})'\n\n return result" }, { "identifier": "UnsharpMasking", "path": "basicsr/data/mmcv_transforms/aug_pix.py", "snippet": "class UnsharpMasking(BaseTransform):\n \"\"\"Apply unsharp masking to an image or a sequence of images.\n\n Args:\n kernel_size (int): The kernel_size of the Gaussian kernel.\n sigma (float): The standard deviation of the Gaussian.\n weight (float): The weight of the \"details\" in the final output.\n threshold (float): Pixel differences larger than this value are\n regarded as \"details\".\n keys (list[str]): The keys whose values are processed.\n\n Added keys are \"xxx_unsharp\", where \"xxx\" are the attributes specified\n in \"keys\".\n \"\"\"\n\n def __init__(self, kernel_size, sigma, weight, threshold, keys):\n\n if kernel_size % 2 == 0:\n raise ValueError('kernel_size must be an odd number, but '\n f'got {kernel_size}.')\n\n self.kernel_size = kernel_size\n self.sigma = sigma\n self.weight = weight\n self.threshold = threshold\n self.keys = keys\n\n kernel = cv2.getGaussianKernel(kernel_size, sigma)\n self.kernel = np.matmul(kernel, kernel.transpose())\n\n def _unsharp_masking(self, imgs):\n \"\"\"Unsharp masking function.\"\"\"\n\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n outputs = []\n for img in imgs:\n img = img.astype(np.float32)\n residue = img - cv2.filter2D(img, -1, self.kernel)\n mask = np.float32(np.abs(residue) > self.threshold)\n soft_mask = cv2.filter2D(mask, -1, self.kernel)\n sharpened = np.clip(img + self.weight * residue, 0, 255)\n\n outputs.append(soft_mask * sharpened + (1 - soft_mask) * img)\n\n if is_single_image:\n outputs = outputs[0]\n\n return outputs\n\n def transform(self, results):\n \"\"\"transform function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n \"\"\"\n for key in self.keys:\n # results[f'{key}_unsharp'] = self._unsharp_masking(results[key])\n results[key] = self._unsharp_masking(results[key])\n\n return results\n\n def __repr__(self):\n\n repr_str = self.__class__.__name__\n repr_str += (f'(keys={self.keys}, kernel_size={self.kernel_size}, '\n f'sigma={self.sigma}, weight={self.weight}, '\n f'threshold={self.threshold})')\n\n return repr_str" }, { "identifier": "RescaleToZeroOne", "path": "basicsr/data/mmcv_transforms/normalization.py", "snippet": "class RescaleToZeroOne(BaseTransform):\n \"\"\"Transform the images into a range between 0 and 1.\n\n Required keys are the keys in attribute \"keys\", added or modified keys are\n the keys in attribute \"keys\".\n It also supports rescaling a list of images.\n\n Args:\n keys (Sequence[str]): The images to be transformed.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def transform(self, results):\n \"\"\"transform function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n \"\"\"\n for key in self.keys:\n if isinstance(results[key], list):\n results[key] = [\n v.astype(np.float32) / 255. for v in results[key]\n ]\n else:\n results[key] = results[key].astype(np.float32) / 255.\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'" }, { "identifier": "RandomBlur", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomBlur:\n \"\"\"Apply random blur to the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n self.keys = keys\n self.params = params\n\n def get_kernel(self, num_kernels: int):\n \"\"\"This is the function to create kernel.\n\n Args:\n num_kernels (int): the number of kernels\n\n Returns:\n _type_: _description_\n \"\"\"\n kernel_type = np.random.choice(\n self.params['kernel_list'], p=self.params['kernel_prob'])\n kernel_size = random.choice(self.params['kernel_size'])\n\n sigma_x_range = self.params.get('sigma_x', [0, 0])\n sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])\n sigma_x_step = self.params.get('sigma_x_step', 0)\n\n sigma_y_range = self.params.get('sigma_y', [0, 0])\n sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])\n sigma_y_step = self.params.get('sigma_y_step', 0)\n\n rotate_angle_range = self.params.get('rotate_angle', [-np.pi, np.pi])\n rotate_angle = np.random.uniform(rotate_angle_range[0],\n rotate_angle_range[1])\n rotate_angle_step = self.params.get('rotate_angle_step', 0)\n\n beta_gau_range = self.params.get('beta_gaussian', [0.5, 4])\n beta_gau = np.random.uniform(beta_gau_range[0], beta_gau_range[1])\n beta_gau_step = self.params.get('beta_gaussian_step', 0)\n\n beta_pla_range = self.params.get('beta_plateau', [1, 2])\n beta_pla = np.random.uniform(beta_pla_range[0], beta_pla_range[1])\n beta_pla_step = self.params.get('beta_plateau_step', 0)\n\n omega_range = self.params.get('omega', None)\n omega_step = self.params.get('omega_step', 0)\n if omega_range is None: # follow Real-ESRGAN settings if not specified\n if kernel_size < 13:\n omega_range = [np.pi / 3., np.pi]\n else:\n omega_range = [np.pi / 5., np.pi]\n omega = np.random.uniform(omega_range[0], omega_range[1])\n\n # determine blurring kernel\n kernels = []\n for _ in range(0, num_kernels):\n kernel = random_mixed_kernels(\n [kernel_type],\n [1],\n kernel_size,\n [sigma_x, sigma_x],\n [sigma_y, sigma_y],\n [rotate_angle, rotate_angle],\n [beta_gau, beta_gau],\n [beta_pla, beta_pla],\n [omega, omega],\n None,\n )\n kernels.append(kernel)\n\n # update kernel parameters\n sigma_x += np.random.uniform(-sigma_x_step, sigma_x_step)\n sigma_y += np.random.uniform(-sigma_y_step, sigma_y_step)\n rotate_angle += np.random.uniform(-rotate_angle_step,\n rotate_angle_step)\n beta_gau += np.random.uniform(-beta_gau_step, beta_gau_step)\n beta_pla += np.random.uniform(-beta_pla_step, beta_pla_step)\n omega += np.random.uniform(-omega_step, omega_step)\n\n sigma_x = np.clip(sigma_x, sigma_x_range[0], sigma_x_range[1])\n sigma_y = np.clip(sigma_y, sigma_y_range[0], sigma_y_range[1])\n rotate_angle = np.clip(rotate_angle, rotate_angle_range[0],\n rotate_angle_range[1])\n beta_gau = np.clip(beta_gau, beta_gau_range[0], beta_gau_range[1])\n beta_pla = np.clip(beta_pla, beta_pla_range[0], beta_pla_range[1])\n omega = np.clip(omega, omega_range[0], omega_range[1])\n\n return kernels\n\n def _apply_random_blur(self, imgs):\n \"\"\"This is the function to apply blur operation on images.\n\n Args:\n imgs (Tensor): images\n\n Returns:\n Tensor: Images applied blur\n \"\"\"\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n # get kernel and blur the input\n kernels = self.get_kernel(num_kernels=len(imgs))\n imgs = [\n cv2.filter2D(img, -1, kernel)\n for img, kernel in zip(imgs, kernels)\n ]\n\n if is_single_image:\n imgs = imgs[0]\n\n return imgs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_blur(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomResize", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomResize:\n \"\"\"Randomly resize the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n self.keys = keys\n self.params = params\n\n self.resize_dict = dict(\n bilinear=cv2.INTER_LINEAR,\n bicubic=cv2.INTER_CUBIC,\n area=cv2.INTER_AREA,\n lanczos=cv2.INTER_LANCZOS4)\n\n def _random_resize(self, imgs):\n \"\"\"This is the function used to randomly resize images for training\n augmentation.\n\n Args:\n imgs (Tensor): training images.\n\n Returns:\n Tensor: images after randomly resized\n \"\"\"\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n h, w = imgs[0].shape[:2]\n\n resize_opt = self.params['resize_opt']\n resize_prob = self.params['resize_prob']\n resize_opt = np.random.choice(resize_opt, p=resize_prob).lower()\n if resize_opt not in self.resize_dict:\n raise NotImplementedError(f'resize_opt [{resize_opt}] is not '\n 'implemented')\n resize_opt = self.resize_dict[resize_opt]\n\n resize_step = self.params.get('resize_step', 0)\n\n # determine the target size, if not provided\n target_size = self.params.get('target_size', None)\n if target_size is None:\n resize_mode = np.random.choice(['up', 'down', 'keep'],\n p=self.params['resize_mode_prob'])\n resize_scale = self.params['resize_scale']\n if resize_mode == 'up':\n scale_factor = np.random.uniform(1, resize_scale[1])\n elif resize_mode == 'down':\n scale_factor = np.random.uniform(resize_scale[0], 1)\n else:\n scale_factor = 1\n\n # determine output size\n h_out, w_out = h * scale_factor, w * scale_factor\n if self.params.get('is_size_even', False):\n h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)\n target_size = (int(h_out), int(w_out))\n else:\n resize_step = 0\n\n # resize the input\n if resize_step == 0: # same target_size for all input images\n outputs = [\n cv2.resize(img, target_size[::-1], interpolation=resize_opt)\n for img in imgs\n ]\n else: # different target_size for each input image\n outputs = []\n for img in imgs:\n img = cv2.resize(\n img, target_size[::-1], interpolation=resize_opt)\n outputs.append(img)\n\n # update scale\n scale_factor += np.random.uniform(-resize_step, resize_step)\n scale_factor = np.clip(scale_factor, resize_scale[0],\n resize_scale[1])\n\n # determine output size\n h_out, w_out = h * scale_factor, w * scale_factor\n if self.params.get('is_size_even', False):\n h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)\n target_size = (int(h_out), int(w_out))\n\n if is_single_image:\n outputs = outputs[0]\n\n return outputs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._random_resize(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomNoise", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomNoise:\n \"\"\"Apply random noise to the input.\n\n Currently support Gaussian noise and Poisson noise.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n self.keys = keys\n self.params = params\n\n def _apply_gaussian_noise(self, imgs):\n \"\"\"This is the function used to apply gaussian noise on images.\n\n Args:\n imgs (Tensor): images\n\n Returns:\n Tensor: images applied gaussian noise\n \"\"\"\n sigma_range = self.params['gaussian_sigma']\n sigma = np.random.uniform(sigma_range[0], sigma_range[1])\n\n sigma_step = self.params.get('gaussian_sigma_step', 0)\n\n gray_noise_prob = self.params['gaussian_gray_noise_prob']\n is_gray_noise = np.random.uniform() < gray_noise_prob\n\n outputs = []\n for img in imgs:\n noise = np.float32(np.random.randn(*(img.shape))) * sigma\n if is_gray_noise:\n noise = noise[:, :, :1]\n outputs.append(img + noise)\n\n # update noise level\n sigma += np.random.uniform(-sigma_step, sigma_step)\n sigma = np.clip(sigma, sigma_range[0], sigma_range[1])\n\n return outputs\n\n def _apply_poisson_noise(self, imgs):\n scale_range = self.params['poisson_scale']\n scale = np.random.uniform(scale_range[0], scale_range[1])\n\n scale_step = self.params.get('poisson_scale_step', 0)\n\n gray_noise_prob = self.params['poisson_gray_noise_prob']\n is_gray_noise = np.random.uniform() < gray_noise_prob\n\n outputs = []\n for img in imgs:\n noise = np.float32(img.copy())\n if is_gray_noise:\n noise = cv2.cvtColor(noise[..., [2, 1, 0]], cv2.COLOR_BGR2GRAY)\n noise = noise[..., np.newaxis]\n noise = np.clip((noise).round(), 0, 255)\n unique_val = 2**np.ceil(np.log2(len(np.unique(noise))))\n noise = np.random.poisson(noise * unique_val).astype(np.float32) \\\n / unique_val - noise\n\n outputs.append(img + noise * scale)\n\n # update noise level\n scale += np.random.uniform(-scale_step, scale_step)\n scale = np.clip(scale, scale_range[0], scale_range[1])\n\n return outputs\n\n def _apply_random_noise(self, imgs):\n \"\"\"This is the function used to apply random noise on images.\n\n Args:\n imgs (Tensor): training images\n\n Returns:\n _type_: _description_\n \"\"\"\n noise_type = np.random.choice(\n self.params['noise_type'], p=self.params['noise_prob'])\n\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n if noise_type.lower() == 'gaussian':\n imgs = self._apply_gaussian_noise(imgs)\n elif noise_type.lower() == 'poisson':\n imgs = self._apply_poisson_noise(imgs)\n else:\n raise NotImplementedError(f'\"noise_type\" [{noise_type}] is '\n 'not implemented.')\n\n if is_single_image:\n imgs = imgs[0]\n\n return imgs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_noise(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomJPEGCompression", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomJPEGCompression:\n \"\"\"Apply random JPEG compression to the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n bgr2rgb (str): Whether change channel order. Default: False.\n \"\"\"\n\n def __init__(self, params, keys, color_type='color', bgr2rgb=False):\n self.keys = keys\n self.params = params\n self.color_type = color_type\n self.bgr2rgb = bgr2rgb\n\n def _apply_random_compression(self, imgs):\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n # determine initial compression level and the step size\n quality = self.params['quality']\n quality_step = self.params.get('quality_step', 0)\n jpeg_param = round(np.random.uniform(quality[0], quality[1]))\n\n # apply jpeg compression\n outputs = []\n for img in imgs:\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_param]\n if self.bgr2rgb and self.color_type == 'color':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n _, img_encoded = cv2.imencode('.jpg', img, encode_param)\n\n if self.color_type == 'color':\n img_encoded = cv2.imdecode(img_encoded, 1)\n if self.bgr2rgb:\n img_encoded = cv2.cvtColor(img_encoded, cv2.COLOR_BGR2RGB)\n outputs.append(img_encoded)\n else:\n outputs.append(cv2.imdecode(img_encoded, 0))\n\n # update compression level\n jpeg_param += np.random.uniform(-quality_step, quality_step)\n jpeg_param = round(np.clip(jpeg_param, quality[0], quality[1]))\n\n if is_single_image:\n outputs = outputs[0]\n\n return outputs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_compression(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomVideoCompression", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomVideoCompression:\n \"\"\"Apply random video compression to the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n assert has_av, 'Please install av to use video compression.'\n\n self.keys = keys\n self.params = params\n logging.getLogger('libav').setLevel(50)\n\n def _apply_random_compression(self, imgs):\n \"\"\"This is the function to apply random compression on images.\n\n Args:\n imgs (Tensor): training images\n\n Returns:\n Tensor: images after randomly compressed\n \"\"\"\n codec = random.choices(self.params['codec'],\n self.params['codec_prob'])[0]\n bitrate = self.params['bitrate']\n bitrate = np.random.randint(bitrate[0], bitrate[1] + 1)\n\n buf = io.BytesIO()\n with av.open(buf, 'w', 'mp4') as container:\n stream = container.add_stream(codec, rate=1)\n stream.height = imgs[0].shape[0]\n stream.width = imgs[0].shape[1]\n stream.pix_fmt = 'yuv420p'\n stream.bit_rate = bitrate\n\n for img in imgs:\n img = img.astype(np.uint8)\n frame = av.VideoFrame.from_ndarray(img, format='rgb24')\n frame.pict_type = 'NONE'\n for packet in stream.encode(frame):\n container.mux(packet)\n\n # Flush stream\n for packet in stream.encode():\n container.mux(packet)\n\n outputs = []\n with av.open(buf, 'r', 'mp4') as container:\n if container.streams.video:\n for frame in container.decode(**{'video': 0}):\n outputs.append(frame.to_rgb().to_ndarray().astype(\n np.float32))\n\n return outputs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_compression(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "DegradationsWithShuffle", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class DegradationsWithShuffle:\n \"\"\"Apply random degradations to input, with degradations being shuffled.\n\n Degradation groups are supported. The order of degradations within the same\n group is preserved. For example, if we have degradations = [a, b, [c, d]]\n and shuffle_idx = None, then the possible orders are\n\n ::\n\n [a, b, [c, d]]\n [a, [c, d], b]\n [b, a, [c, d]]\n [b, [c, d], a]\n [[c, d], a, b]\n [[c, d], b, a]\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n degradations (list[dict]): The list of degradations.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n shuffle_idx (list | None, optional): The degradations corresponding to\n these indices are shuffled. If None, all degradations are shuffled.\n Default: None.\n \"\"\"\n\n def __init__(self, degradations, keys, shuffle_idx=None):\n\n self.keys = keys\n\n self.degradations = self._build_degradations(degradations)\n\n if shuffle_idx is None:\n self.shuffle_idx = list(range(0, len(degradations)))\n else:\n self.shuffle_idx = shuffle_idx\n\n def _build_degradations(self, degradations):\n for i, degradation in enumerate(degradations):\n if isinstance(degradation, (list, tuple)):\n degradations[i] = self._build_degradations(degradation)\n else:\n degradation_ = allowed_degradations[degradation['type']]\n degradations[i] = degradation_(degradation['params'],\n self.keys)\n\n return degradations\n\n def __call__(self, results):\n # shuffle degradations\n if len(self.shuffle_idx) > 0:\n shuffle_list = [self.degradations[i] for i in self.shuffle_idx]\n np.random.shuffle(shuffle_list)\n for i, idx in enumerate(self.shuffle_idx):\n self.degradations[idx] = shuffle_list[i]\n\n # apply degradations to input\n for degradation in self.degradations:\n if isinstance(degradation, (tuple, list)):\n for subdegrdation in degradation:\n results = subdegrdation(results)\n else:\n results = degradation(results)\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(degradations={self.degradations}, '\n f'keys={self.keys}, '\n f'shuffle_idx={self.shuffle_idx})')\n return repr_str" }, { "identifier": "circular_lowpass_kernel", "path": "basicsr/data/degradations.py", "snippet": "def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):\n \"\"\"2D sinc filter\n\n Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter\n\n Args:\n cutoff (float): cutoff frequency in radians (pi is max)\n kernel_size (int): horizontal and vertical size, must be odd.\n pad_to (int): pad kernel size to desired size, must be odd or zero.\n \"\"\"\n assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'\n kernel = np.fromfunction(\n lambda x, y: cutoff * special.j1(cutoff * np.sqrt(\n (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(\n (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])\n kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)\n kernel = kernel / np.sum(kernel)\n if pad_to > kernel_size:\n pad_size = (pad_to - kernel_size) // 2\n kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))\n return kernel" }, { "identifier": "random_mixed_kernels", "path": "basicsr/data/degradations.py", "snippet": "def random_mixed_kernels(kernel_list,\n kernel_prob,\n kernel_size=21,\n sigma_x_range=(0.6, 5),\n sigma_y_range=(0.6, 5),\n rotation_range=(-math.pi, math.pi),\n betag_range=(0.5, 8),\n betap_range=(0.5, 8),\n noise_range=None,\n return_sigma=False):\n \"\"\"Randomly generate mixed kernels.\n\n Args:\n kernel_list (tuple): a list name of kernel types,\n support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',\n 'plateau_aniso']\n kernel_prob (tuple): corresponding kernel probability for each\n kernel type\n kernel_size (int):\n sigma_x_range (tuple): [0.6, 5]\n sigma_y_range (tuple): [0.6, 5]\n rotation range (tuple): [-math.pi, math.pi]\n beta_range (tuple): [0.5, 8]\n noise_range(tuple, optional): multiplicative kernel noise,\n [0.75, 1.25]. Default: None\n\n Returns:\n kernel (ndarray):\n \"\"\"\n kernel_type = random.choices(kernel_list, kernel_prob)[0]\n if not return_sigma:\n if kernel_type == 'iso':\n kernel = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'aniso':\n kernel = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma)\n elif kernel_type == 'generalized_iso':\n kernel = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=True,\n return_sigma=return_sigma)\n elif kernel_type == 'generalized_aniso':\n kernel = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=False,\n return_sigma=return_sigma)\n elif kernel_type == 'plateau_iso':\n kernel = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'plateau_aniso':\n kernel = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma)\n return kernel\n else:\n if kernel_type == 'iso':\n kernel, sigma_list = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'aniso':\n kernel, sigma_list = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma)\n elif kernel_type == 'generalized_iso':\n kernel, sigma_list = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=True,\n return_sigma=return_sigma)\n elif kernel_type == 'generalized_aniso':\n kernel, sigma_list = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=False,\n return_sigma=return_sigma)\n elif kernel_type == 'plateau_iso':\n kernel, sigma_list = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'plateau_aniso':\n kernel, sigma_list = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma)\n return kernel, sigma_list" }, { "identifier": "augment", "path": "basicsr/data/transforms.py", "snippet": "def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):\n \"\"\"Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).\n\n We use vertical flip and transpose for rotation implementation.\n All the images in the list use the same augmentation.\n\n Args:\n imgs (list[ndarray] | ndarray): Images to be augmented. If the input\n is an ndarray, it will be transformed to a list.\n hflip (bool): Horizontal flip. Default: True.\n rotation (bool): Ratotation. Default: True.\n flows (list[ndarray]: Flows to be augmented. If the input is an\n ndarray, it will be transformed to a list.\n Dimension is (h, w, 2). Default: None.\n return_status (bool): Return the status of flip and rotation.\n Default: False.\n\n Returns:\n list[ndarray] | ndarray: Augmented images and flows. If returned\n results only have one element, just return ndarray.\n\n \"\"\"\n hflip = hflip and random.random() < 0.5\n vflip = rotation and random.random() < 0.5\n rot90 = rotation and random.random() < 0.5\n\n def _augment(img):\n if hflip: # horizontal\n cv2.flip(img, 1, img)\n if vflip: # vertical\n cv2.flip(img, 0, img)\n if rot90:\n img = img.transpose(1, 0, 2)\n return img\n\n def _augment_flow(flow):\n if hflip: # horizontal\n cv2.flip(flow, 1, flow)\n flow[:, :, 0] *= -1\n if vflip: # vertical\n cv2.flip(flow, 0, flow)\n flow[:, :, 1] *= -1\n if rot90:\n flow = flow.transpose(1, 0, 2)\n flow = flow[:, :, [1, 0]]\n return flow\n\n if not isinstance(imgs, list):\n imgs = [imgs]\n imgs = [_augment(img) for img in imgs]\n if len(imgs) == 1:\n imgs = imgs[0]\n\n if flows is not None:\n if not isinstance(flows, list):\n flows = [flows]\n flows = [_augment_flow(flow) for flow in flows]\n if len(flows) == 1:\n flows = flows[0]\n return imgs, flows\n else:\n if return_status:\n return imgs, (hflip, vflip, rot90)\n else:\n return imgs" }, { "identifier": "single_random_crop", "path": "basicsr/data/transforms.py", "snippet": "def single_random_crop(img_gts, gt_patch_size, gt_path=None):\n \"\"\"Random crop. Support Numpy array and Tensor inputs.\n\n It crops lists of lq and gt images with corresponding locations.\n\n Args:\n img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n gt_patch_size (int): GT patch size.\n gt_path (str): Path to ground-truth. Default: None.\n\n Returns:\n list[ndarray] | ndarray: GT images and LQ images. If returned results\n only have one element, just return ndarray.\n \"\"\"\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_gt, w_gt = img_gts[0].size()[-2:]\n else:\n h_gt, w_gt = img_gts[0].shape[0:2]\n\n # if h_gt != h_lq * scale or w_gt != w_lq * scale:\n # raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n # f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_gt < gt_patch_size or w_gt < gt_patch_size:\n raise ValueError(f'GT ({h_gt}, {w_gt}) is smaller than '\n f'patch size ({gt_patch_size}, {gt_patch_size}).')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_gt - gt_patch_size)\n left = random.randint(0, w_gt - gt_patch_size)\n\n # crop corresponding gt patch\n if input_type == 'Tensor':\n img_gts = [v[:, :, top:top + gt_patch_size, left:left + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top:top + gt_patch_size, left:left + gt_patch_size, ...] for v in img_gts]\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n\n return img_gts" }, { "identifier": "paired_random_crop", "path": "basicsr/data/transforms.py", "snippet": "def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None):\n \"\"\"Paired random crop. Support Numpy array and Tensor inputs.\n\n It crops lists of lq and gt images with corresponding locations.\n\n Args:\n img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n img_lqs (list[ndarray] | ndarray): LQ images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n gt_patch_size (int): GT patch size.\n scale (int): Scale factor.\n gt_path (str): Path to ground-truth. Default: None.\n\n Returns:\n list[ndarray] | ndarray: GT images and LQ images. If returned results\n only have one element, just return ndarray.\n \"\"\"\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n if not isinstance(img_lqs, list):\n img_lqs = [img_lqs]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_lq, w_lq = img_lqs[0].size()[-2:]\n h_gt, w_gt = img_gts[0].size()[-2:]\n else:\n h_lq, w_lq = img_lqs[0].shape[0:2]\n h_gt, w_gt = img_gts[0].shape[0:2]\n lq_patch_size = gt_patch_size // scale\n\n if h_gt != h_lq * scale or w_gt != w_lq * scale:\n raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_lq < lq_patch_size or w_lq < lq_patch_size:\n raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '\n f'({lq_patch_size}, {lq_patch_size}). '\n f'Please remove {gt_path}.')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_lq - lq_patch_size)\n left = random.randint(0, w_lq - lq_patch_size)\n\n # crop lq patch\n if input_type == 'Tensor':\n img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]\n else:\n img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]\n\n # crop corresponding gt patch\n top_gt, left_gt = int(top * scale), int(left * scale)\n if input_type == 'Tensor':\n img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n if len(img_lqs) == 1:\n img_lqs = img_lqs[0]\n return img_gts, img_lqs" }, { "identifier": "FileClient", "path": "basicsr/utils/file_client.py", "snippet": "class FileClient(object):\n \"\"\"A general file client to access files in different backend.\n\n The client loads a file or text in a specified backend from its path\n and return it as a binary file. it can also register other backend\n accessor with a given name and backend class.\n\n Attributes:\n backend (str): The storage backend type. Options are \"disk\",\n \"memcached\" and \"lmdb\".\n client (:obj:`BaseStorageBackend`): The backend object.\n \"\"\"\n\n _backends = {\n 'disk': HardDiskBackend,\n 'memcached': MemcachedBackend,\n 'lmdb': LmdbBackend,\n }\n\n def __init__(self, backend='disk', **kwargs):\n if backend not in self._backends:\n raise ValueError(f'Backend {backend} is not supported. Currently supported ones'\n f' are {list(self._backends.keys())}')\n self.backend = backend\n self.client = self._backends[backend](**kwargs)\n\n def get(self, filepath, client_key='default'):\n # client_key is used only for lmdb, where different fileclients have\n # different lmdb environments.\n if self.backend == 'lmdb':\n return self.client.get(filepath, client_key)\n else:\n return self.client.get(filepath)\n\n def get_text(self, filepath):\n return self.client.get_text(filepath)" }, { "identifier": "imfrombytes", "path": "basicsr/utils/img_util.py", "snippet": "def imfrombytes(content, flag='color', float32=False):\n \"\"\"Read an image from bytes.\n\n Args:\n content (bytes): Image bytes got from files or other streams.\n flag (str): Flags specifying the color type of a loaded image,\n candidates are `color`, `grayscale` and `unchanged`.\n float32 (bool): Whether to change to float32., If True, will also norm\n to [0, 1]. Default: False.\n\n Returns:\n ndarray: Loaded image array.\n \"\"\"\n img_np = np.frombuffer(content, np.uint8)\n imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}\n img = cv2.imdecode(img_np, imread_flags[flag])\n if float32:\n img = img.astype(np.float32) / 255.\n return img" }, { "identifier": "img2tensor", "path": "basicsr/utils/img_util.py", "snippet": "def img2tensor(imgs, bgr2rgb=True, float32=True):\n \"\"\"Numpy array to tensor.\n\n Args:\n imgs (list[ndarray] | ndarray): Input images.\n bgr2rgb (bool): Whether to change bgr to rgb.\n float32 (bool): Whether to change to float32.\n\n Returns:\n list[tensor] | tensor: Tensor images. If returned results only have\n one element, just return tensor.\n \"\"\"\n\n def _totensor(img, bgr2rgb, float32):\n if img.shape[2] == 3 and bgr2rgb:\n if img.dtype == 'float64':\n img = img.astype('float32')\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = torch.from_numpy(img.transpose(2, 0, 1))\n if float32:\n img = img.float()\n return img\n\n if isinstance(imgs, list):\n return [_totensor(img, bgr2rgb, float32) for img in imgs]\n else:\n return _totensor(imgs, bgr2rgb, float32)" }, { "identifier": "imwrite", "path": "basicsr/utils/img_util.py", "snippet": "def imwrite(img, file_path, params=None, auto_mkdir=True):\n \"\"\"Write image to file.\n\n Args:\n img (ndarray): Image array to be written.\n file_path (str): Image file path.\n params (None or list): Same as opencv's :func:`imwrite` interface.\n auto_mkdir (bool): If the parent folder of `file_path` does not exist,\n whether to create it automatically.\n\n Returns:\n bool: Successful or not.\n \"\"\"\n if auto_mkdir:\n dir_name = os.path.abspath(os.path.dirname(file_path))\n os.makedirs(dir_name, exist_ok=True)\n ok = cv2.imwrite(file_path, img, params)\n if not ok:\n raise IOError('Failed in writing images.')" }, { "identifier": "tensor2img", "path": "basicsr/utils/img_util.py", "snippet": "def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):\n \"\"\"Convert torch Tensors into image numpy arrays.\n\n After clamping to [min, max], values will be normalized to [0, 1].\n\n Args:\n tensor (Tensor or list[Tensor]): Accept shapes:\n 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);\n 2) 3D Tensor of shape (3/1 x H x W);\n 3) 2D Tensor of shape (H x W).\n Tensor channel should be in RGB order.\n rgb2bgr (bool): Whether to change rgb to bgr.\n out_type (numpy type): output types. If ``np.uint8``, transform outputs\n to uint8 type with range [0, 255]; otherwise, float type with\n range [0, 1]. Default: ``np.uint8``.\n min_max (tuple[int]): min and max values for clamp.\n\n Returns:\n (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of\n shape (H x W). The channel order is BGR.\n \"\"\"\n if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')\n\n if torch.is_tensor(tensor):\n tensor = [tensor]\n result = []\n for _tensor in tensor:\n _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)\n _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])\n\n n_dim = _tensor.dim()\n if n_dim == 4:\n img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()\n img_np = img_np.transpose(1, 2, 0)\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 3:\n img_np = _tensor.numpy()\n img_np = img_np.transpose(1, 2, 0)\n if img_np.shape[2] == 1: # gray image\n img_np = np.squeeze(img_np, axis=2)\n else:\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 2:\n img_np = _tensor.numpy()\n else:\n raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')\n if out_type == np.uint8:\n # Unlike MATLAB, numpy.unit8() WILL NOT round by default.\n img_np = (img_np * 255.0).round()\n img_np = img_np.astype(out_type)\n result.append(img_np)\n # if len(result) == 1 and torch.is_tensor(tensor):\n if len(result) == 1:\n result = result[0]\n return result" }, { "identifier": "get_root_logger", "path": "basicsr/utils/logger.py", "snippet": "def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):\n \"\"\"Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added.\n\n Args:\n logger_name (str): root logger name. Default: 'basicsr'.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n \"Error\" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n \"\"\"\n logger = logging.getLogger(logger_name)\n # if the logger has been initialized, just return it\n if logger_name in initialized_logger:\n return logger\n\n format_str = '%(asctime)s %(levelname)s: %(message)s'\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(format_str))\n logger.addHandler(stream_handler)\n logger.propagate = False\n rank, _ = get_dist_info()\n if rank != 0:\n logger.setLevel('ERROR')\n elif log_file is not None:\n logger.setLevel(log_level)\n # add file handler\n file_handler = logging.FileHandler(log_file, 'w')\n file_handler.setFormatter(logging.Formatter(format_str))\n file_handler.setLevel(log_level)\n logger.addHandler(file_handler)\n initialized_logger[logger_name] = True\n return logger" }, { "identifier": "dequantize_flow", "path": "basicsr/utils/flow_util.py", "snippet": "def dequantize_flow(dx, dy, max_val=0.02, denorm=True):\n \"\"\"Recover from quantized flow.\n\n Args:\n dx (ndarray): Quantized dx.\n dy (ndarray): Quantized dy.\n max_val (float): Maximum value used when quantizing.\n denorm (bool): Whether to multiply flow values with width/height.\n\n Returns:\n ndarray: Dequantized flow.\n \"\"\"\n assert dx.shape == dy.shape\n assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1)\n\n dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]]\n\n if denorm:\n dx *= dx.shape[1]\n dy *= dx.shape[0]\n flow = np.dstack((dx, dy))\n return flow" }, { "identifier": "DATASET_REGISTRY", "path": "basicsr/utils/registry.py", "snippet": "DATASET_REGISTRY = Registry('dataset')" } ]
import cv2 import math import time import os import os.path as osp import numpy as np import random import torch from copy import deepcopy from pathlib import Path from torch.utils import data as data from basicsr.data.mmcv_transforms import Clip, UnsharpMasking, RescaleToZeroOne from basicsr.data.mmcv_transforms import RandomBlur, RandomResize, RandomNoise, RandomJPEGCompression, RandomVideoCompression, DegradationsWithShuffle from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels from basicsr.data.transforms import augment, single_random_crop, paired_random_crop from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor, tensor2img, imwrite from basicsr.utils.flow_util import dequantize_flow from basicsr.utils.registry import DATASET_REGISTRY
13,872
# @DATASET_REGISTRY.register() class RealVSRRecurrentDataset(data.Dataset): """REDS dataset for training recurrent networks. The keys are generated from a meta info txt file. basicsr/data/meta_info/meta_info_REDS_GT.txt Each line contains: 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by a white space. Examples: 000 100 (720,1280,3) 001 100 (720,1280,3) ... Key examples: "000/00000000" GT (gt): Ground-Truth; LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. Args: opt (dict): Config for train dataset. It contains the following keys: dataroot_gt (str): Data root path for gt. meta_info_file (str): Path for meta information file. val_partition (str): Validation partition types. 'REDS4' or 'official'. io_backend (dict): IO backend type and other kwarg. num_frame (int): Window size for input frames. gt_size (int): Cropped patched size for gt patches. interval_list (list): Interval list for temporal augmentation. random_reverse (bool): Random reverse input frames. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). """ def __init__(self, opt): super(RealVSRRecurrentDataset, self).__init__() self.opt = opt self.gt_root = Path(opt['dataroot_gt']) self.num_frame = opt['num_frame'] self.keys = [] with open(opt['meta_info_file'], 'r') as fin: for line in fin: folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # the first degradation self.random_blur_1 = RandomBlur( params=opt['degradation_1']['random_blur']['params'], keys=opt['degradation_1']['random_blur']['keys'] ) self.random_resize_1 = RandomResize( params=opt['degradation_1']['random_resize']['params'], keys=opt['degradation_1']['random_resize']['keys'] ) self.random_noise_1 = RandomNoise( params=opt['degradation_1']['random_noise']['params'], keys=opt['degradation_1']['random_noise']['keys'] ) self.random_jpeg_1 = RandomJPEGCompression( params=opt['degradation_1']['random_jpeg']['params'], keys=opt['degradation_1']['random_jpeg']['keys'] )
# @DATASET_REGISTRY.register() class RealVSRRecurrentDataset(data.Dataset): """REDS dataset for training recurrent networks. The keys are generated from a meta info txt file. basicsr/data/meta_info/meta_info_REDS_GT.txt Each line contains: 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by a white space. Examples: 000 100 (720,1280,3) 001 100 (720,1280,3) ... Key examples: "000/00000000" GT (gt): Ground-Truth; LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. Args: opt (dict): Config for train dataset. It contains the following keys: dataroot_gt (str): Data root path for gt. meta_info_file (str): Path for meta information file. val_partition (str): Validation partition types. 'REDS4' or 'official'. io_backend (dict): IO backend type and other kwarg. num_frame (int): Window size for input frames. gt_size (int): Cropped patched size for gt patches. interval_list (list): Interval list for temporal augmentation. random_reverse (bool): Random reverse input frames. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). """ def __init__(self, opt): super(RealVSRRecurrentDataset, self).__init__() self.opt = opt self.gt_root = Path(opt['dataroot_gt']) self.num_frame = opt['num_frame'] self.keys = [] with open(opt['meta_info_file'], 'r') as fin: for line in fin: folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # the first degradation self.random_blur_1 = RandomBlur( params=opt['degradation_1']['random_blur']['params'], keys=opt['degradation_1']['random_blur']['keys'] ) self.random_resize_1 = RandomResize( params=opt['degradation_1']['random_resize']['params'], keys=opt['degradation_1']['random_resize']['keys'] ) self.random_noise_1 = RandomNoise( params=opt['degradation_1']['random_noise']['params'], keys=opt['degradation_1']['random_noise']['keys'] ) self.random_jpeg_1 = RandomJPEGCompression( params=opt['degradation_1']['random_jpeg']['params'], keys=opt['degradation_1']['random_jpeg']['keys'] )
self.random_mpeg_1 = RandomVideoCompression(
7
2023-11-30 01:50:29+00:00
16k
Institute4FutureHealth/CHA
tasks/types.py
[ { "identifier": "ActivityAnalysis", "path": "tasks/affect/activity_analysis.py", "snippet": "class ActivityAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw activity affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_activity_analysis\"\n chat_name: str = \"AffectActivityAnalysis\"\n description: str = (\n \"Analyze the physical activity data. You must Call this whenever physical activity analysis\"\n \"(e.g., 'average', 'sum', or 'trend') is needed. DON'T rely on your analysis.\"\n \"For example, if the user asks for trends (or variations) in data, you must call this task\"\n )\n dependencies: List[str] = [\"affect_activity_get\"]\n inputs: List[str] = [\n \"It is an string but in json format. It is the output of the $affect_activity_get$\",\n \"analysis_type. It can be one of [$average$, $sum$, $trend$].\",\n ]\n outputs: List[str] = [\n (\n \"The analysis result for steps_count. Look for analysis_type to find the type of analysis. \"\n \"steps_count is the total number of steps registered during the day.\"\n ),\n (\n \"The analysis result for rest_time. Look for analysis_type to find the type of analysis. \"\n \"rest_time is the time (in minutes) during the day spent resting, i.e. sleeping or lying down.\"\n ),\n (\n \"The analysis result for inactive_time. Look for analysis_type to find the type of analysis. \"\n \"inactive_time is the time (in minutes) during the day spent resting, i.e. sitting or standing still.\"\n ),\n (\n \"The analysis result for low_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"low_acitivity_time is the (in minutes) during the day with low intensity activity (e.g. household work).\"\n ),\n (\n \"The analysis result for medimum_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"medimum_acitivity_time is the (in minutes) during the day with medium intensity activity (e.g. walking).\"\n ),\n (\n \"The analysis result for high_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"high_acitivity_time is the (in minutes) during the day with high intensity activity (e.g. running).\"\n ),\n ]\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n if len(inputs) == 0:\n return \"\"\n\n df = pd.read_json(\n StringIO(inputs[0][\"data\"].strip()), orient=\"records\"\n )\n analysis_type = inputs[1].strip()\n if analysis_type == \"average\":\n df = df.drop(\"date\", axis=1) # No average for date!\n df = df.mean().to_frame().T\n elif analysis_type == \"sum\":\n df = df.drop(\"date\", axis=1) # No sum for date!\n df = df.sum().to_frame().T\n elif analysis_type == \"trend\":\n df = self._calculate_slope(df)\n else:\n raise ValueError(\n \"The input analysis type has not been defined!\"\n )\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "ActivityGet", "path": "tasks/affect/activity_get.py", "snippet": "class ActivityGet(Affect):\n \"\"\"\n **Description:**\n\n This tasks gets activity affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_activity_get\"\n chat_name: str = \"AffectActivityGet\"\n description: str = (\n \"Get the physical activity parameters for a specific date or \"\n \"a period (if two dates are provided). \"\n \"You must Call $affect_analysis$ whenever physical activity \"\n \"analysis (e.g., 'average', 'sum', or 'trend') is needed. DON'T rely on your analysis\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"user ID in string. It can be refered as user, patient, individual, etc. Start with 'par_' following with a number (e.g., 'par_1').\",\n \"start date of the physical activity data in string with the following format: '%Y-%m-%d'\",\n (\n \"end date of the physical activity data in string with the following format: '%Y-%m-%d'.\"\n \"If there is no end date, the value should be an empty string (i.e., '')\"\n ),\n ]\n outputs: List[str] = [\n \"steps_count is the total number of steps registered during the day.\",\n \"rest_time is the time (in minutes) during the day spent resting, i.e. sleeping or lying down.\",\n \"inactive_time is the time (in minutes) during the day spent resting, i.e. sitting or standing still.\",\n \"low_acitivity_time is the (in minutes) during the day with low intensity activity (e.g. household work).\",\n \"medimum_acitivity_time is the (in minutes) during the day with medium intensity activity (e.g. walking).\",\n \"high_acitivity_time is the (in minutes) during the day with high intensity activity (e.g. running).\",\n ]\n\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = True\n #\n file_name: str = \"activity.csv\"\n device_name: str = \"oura\"\n local_dir: str = \"data/affect\"\n\n columns_to_keep: List[str] = [\n \"date\",\n \"steps\",\n \"rest\",\n \"inactive\",\n \"low\",\n \"medium\",\n \"high\",\n ]\n columns_revised: List[str] = [\n \"date\",\n \"steps_count\",\n \"rest_time\",\n \"inactive_time\",\n \"low_acitivity_time\",\n \"medimum_acitivity_time\",\n \"high_acitivity_time\",\n ]\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n user_id = inputs[0].strip()\n full_dir = os.path.join(\n self.local_dir, user_id, self.device_name\n )\n df = self._get_data(\n local_dir=full_dir,\n file_name=self.file_name,\n start_date=inputs[1].strip(),\n end_date=inputs[2].strip(),\n usecols=self.columns_to_keep,\n )\n df.columns = self.columns_revised\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "SleepAnalysis", "path": "tasks/affect/sleep_analysis.py", "snippet": "class SleepAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw sleep affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_sleep_analysis\"\n chat_name: str = \"AffectSleepAnalysis\"\n description: str = (\n \"Performs trend or average analysis on the provided sleep data. You must Call this whenever sleep trend or average is needed.\"\n \"For example, if the user asks for trends (or variations) in data, you must call this task\"\n )\n dependencies: List[str] = [\"affect_sleep_get\"]\n inputs: List[str] = [\n \"datapipe key to the data\",\n \"analysis_type. It can be one of [average, trend].\",\n ]\n outputs: List[str] = [\n (\n \"The analysis result for total_sleep_time. Look for analysis_type to find the type of analysis. \"\n \"total_sleep_time (in minutes) is Total amount of sleep (a.k.a. sleep duration) registered during the sleep period.\"\n ),\n (\n \"The analysis result for awake_duration. Look for analysis_type to find the type of analysis. \"\n \"awake_duration (in minutes) is the total amount of awake time registered during the sleep period.\"\n ),\n (\n \"The analysis result for light_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"light_sleep_duration (in minutes) is the total amount of light (N1 or N2) sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for rem_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"rem_sleep_duration (in minutes) is the total amount of REM sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for deep_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"deep_sleep_duration (in minutes) is the total amount of deep (N3) sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for sleep_onset_latency. Look for analysis_type to find the type of analysis. sleep_onset_latency (in minutes) \"\n \"is the detected latency from bedtime_start to the beginning of the first five minutes of persistent sleep.\"\n ),\n (\n \"The analysis result for midpoint_time_of_sleep. Look for analysis_type to find the type of analysis. \"\n \"midpoint_time_of_sleep (in minutes) is the time from the start of sleep to the midpoint of sleep. The midpoint ignores awake periods.\"\n ),\n (\n \"The analysis result for sleep_efficiency. Look for analysis_type to find the type of analysis. \"\n \"sleep_efficiency is the percentage of the sleep period spent asleep (100% * sleep duration / time in bed).\"\n ),\n (\n \"The analysis result for average_heart_rate. Look for analysis_type to find the type of analysis. \"\n \"average_heart_rate is the average heart rate registered during the sleep period.\"\n ),\n (\n \"The analysis result for minimum_heart_rate. Look for analysis_type to find the type of analysis. \"\n \"minimum_heart_rate is the lowest heart rate (5 minutes sliding average) registered during the sleep period.\"\n ),\n (\n \"The analysis result for rmssd. Look for analysis_type to find the type of analysis. \"\n \"rmssd is the average Root Mean Square of Successive Differences (RMSSD) registered during the sleep period.\"\n ),\n (\n \"The analysis result for average_breathing_rate. Look for analysis_type to find the type of analysis. \"\n \"average_breathing_rate is the average breathing rate registered during the sleep period.\"\n ),\n (\n \"The analysis result for temperature_variation. Look for analysis_type to find the type of analysis. \"\n \"temperature_variation is the skin temperature deviation from the long-term temperature average.\"\n ),\n ]\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = True\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n df = pd.read_json(\n StringIO(inputs[0][\"data\"].strip()), orient=\"records\"\n )\n analysis_type = inputs[1].strip()\n if analysis_type == \"average\":\n df = df.drop(\"date\", axis=1) # No average for date!\n df = df.mean().to_frame().T\n elif analysis_type == \"trend\":\n df = self._calculate_slope(df)\n else:\n raise ValueError(\n \"The input analysis type has not been defined!\"\n )\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "SleepGet", "path": "tasks/affect/sleep_get.py", "snippet": "class SleepGet(Affect):\r\n \"\"\"\r\n **Description:**\r\n\r\n This tasks gets sleep affect data for specific patient.\r\n \"\"\"\r\n\r\n name: str = \"affect_sleep_get\"\r\n chat_name: str = \"AffectSleepGet\"\r\n description: str = (\r\n \"Get the sleep parameters for a specific date or \"\r\n \"a period (if two dates are provided). \"\r\n \"You must Call $affect_sleep_analysis$ whenever sleep \"\r\n \"analysis (e.g., 'average' or 'trend') is needed. DON'T rely on your analysis\"\r\n )\r\n dependencies: List[str] = []\r\n inputs: List[str] = [\r\n \"user ID in string. It can be refered as user, patient, individual, etc. Start with 'par_' following with a number (e.g., 'par_1').\",\r\n \"start date of the sleep data in string with the following format: '%Y-%m-%d'\",\r\n (\r\n \"end date of the sleep data in string with the following format: '%Y-%m-%d'. \"\r\n \"If there is no end date, the value should be an empty string (i.e., '')\"\r\n ),\r\n ]\r\n outputs: List[str] = [\r\n \"total_sleep_time (in minutes) is Total amount of sleep (a.k.a. sleep duration) registered during the sleep period.\",\r\n \"awake_duration (in minutes) is the total amount of awake time registered during the sleep period.\",\r\n \"light_sleep_duration (in minutes) is the total amount of light (N1 or N2) sleep registered during the sleep period.\",\r\n \"rem_sleep_duration (in minutes) is the total amount of REM sleep registered during the sleep period.\",\r\n \"deep_sleep_duration (in minutes) is the total amount of deep (N3) sleep registered during the sleep period.\",\r\n \"sleep_onset_latency (in minutes) is detected latency from bedtime_start to the beginning of the first five minutes of persistent sleep.\",\r\n \"midpoint_time_of_sleep (in minutes) is the time from the start of sleep to the midpoint of sleep. The midpoint ignores awake periods.\",\r\n \"sleep_efficiency is the percentage of the sleep period spent asleep (100% * sleep duration / time in bed).\",\r\n \"average_heart_rate is the average heart rate registered during the sleep period.\",\r\n \"minimum_heart_rate is the lowest heart rate (5 minutes sliding average) registered during the sleep period.\",\r\n \"rmssd is the average Root Mean Square of Successive Differences (RMSSD) registered during the sleep period.\",\r\n \"average_breathing_rate is the average breathing rate registered during the sleep period.\",\r\n \"temperature_variation is the skin temperature deviation from the long-term temperature average.\",\r\n ]\r\n # False if the output should directly passed back to the planner.\r\n # True if it should be stored in datapipe\r\n output_type: bool = True\r\n #\r\n file_name: str = \"sleep.csv\"\r\n device_name: str = \"oura\"\r\n local_dir: str = \"data/affect\"\r\n columns_to_keep: List[str] = [\r\n \"date\",\r\n \"total\",\r\n \"awake\",\r\n \"light\",\r\n \"rem\",\r\n \"deep\",\r\n \"onset_latency\",\r\n \"midpoint_time\",\r\n \"efficiency\",\r\n \"hr_average\",\r\n \"hr_lowest\",\r\n \"rmssd\",\r\n \"breath_average\",\r\n \"temperature_delta\",\r\n ]\r\n columns_revised: List[str] = [\r\n \"date\",\r\n \"total_sleep_time\",\r\n \"awake_duration\",\r\n \"light_sleep_duration\",\r\n \"rem_sleep_duration\",\r\n \"deep_sleep_duration\",\r\n \"sleep_onset_latency\",\r\n \"midpoint_time_of_sleep\",\r\n \"sleep_efficiency\",\r\n \"average_heart_rate\",\r\n \"minimum_heart_rate\",\r\n \"rmssd\",\r\n \"average_breathing_rate\",\r\n \"temperature_variation\",\r\n ]\r\n variables_in_seconds: List[str] = [\r\n \"total_sleep_time\",\r\n \"awake_duration\",\r\n \"light_sleep_duration\",\r\n \"rem_sleep_duration\",\r\n \"deep_sleep_duration\",\r\n \"sleep_onset_latency\",\r\n \"midpoint_time_of_sleep\",\r\n ]\r\n\r\n def _execute(\r\n self,\r\n inputs: List[Any],\r\n ) -> str:\r\n user_id = inputs[0].strip()\r\n full_dir = os.path.join(\r\n self.local_dir, user_id, self.device_name\r\n )\r\n df = self._get_data(\r\n local_dir=full_dir,\r\n file_name=self.file_name,\r\n start_date=inputs[1].strip(),\r\n end_date=inputs[2].strip(),\r\n usecols=self.columns_to_keep,\r\n )\r\n df.columns = self.columns_revised\r\n df = self._convert_seconds_to_minutes(\r\n df, self.variables_in_seconds\r\n )\r\n df = df.round(2)\r\n json_out = df.to_json(orient=\"records\")\r\n return json_out\r" }, { "identifier": "AskUser", "path": "tasks/ask_user.py", "snippet": "class AskUser(BaseTask):\n \"\"\"\n **Description:**\n\n This task is asking question back to the user and stops planning. When needed, the planner will decide to ask question from user\n and use the user's answer to proceed to the planning.\n\n \"\"\"\n\n name: str = \"ask_user\"\n chat_name: str = \"AskUser\"\n description: str = (\n \"Ask user to provide more information or directly answer user's question. \"\n \"You should try your best using other tools before calling this tool.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"The text returned to user. It should be relevant and very detailed based on the latest user's Question.\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n return_direct: bool = True\n\n translator: Any = None #: :meta private:\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"Translate query\"\"\"\n if inputs is None:\n return \"\"\n return inputs[0]\n\n def explain(\n self,\n ) -> str:\n return \"This task simply asks user to provide more information or continue interaction.\"" }, { "identifier": "GoogleTranslate", "path": "tasks/google_translator.py", "snippet": "class GoogleTranslate(BaseTask):\n \"\"\"\n **Description:**\n\n This task uses google translate to autmatically convert from the user language to english or vise versa.\n\n \"\"\"\n\n name: str = \"google_translator\"\n chat_name: str = \"GoogleTranslator\"\n description: str = (\n \"Translates queries between different languages.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"text to be translated\",\n \"destination language\",\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n translator: Any = None #: :meta private:\n\n @model_validator(mode=\"before\")\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"\n Validate that api key and python package exists in environment.\n\n Args:\n cls (object): The class itself.\n values (Dict): The dictionary containing the values for validation.\n Return:\n Dict:The original values.\n Raise:\n ImportError: If the 'playwright' package is not installed.\n\n\n \"\"\"\n\n try:\n from googletrans import Translator\n\n values[\"translator\"] = Translator()\n except ImportError:\n raise ValueError(\n \"Could not import googletrans python package. \"\n \"Please install it with `pip install googletrans-py`.\"\n )\n return values\n\n def _parse_input(\n self,\n input_args: str,\n ) -> List[str]:\n \"\"\"\n Parse the input string into a list of strings.\n\n Args:\n input (str): Input string to be parsed.\n Return:\n List[str]: List of parsed strings.\n\n \"\"\"\n return input_args.split(\"$#\")\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task.\n\n Args:\n input (str): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n \"\"\"\n if len(inputs) < 2:\n return \"\", \"\"\n dest = inputs[1] if inputs[1] is not None else \"en\"\n result = self.translator.translate(inputs[0], dest=dest)\n return result.text, result.src\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n \"\"\"\n\n return \"This task uses google translate to translate between languages\"" }, { "identifier": "Click", "path": "tasks/playwright/click.py", "snippet": "class Click(BaseBrowser):\n \"\"\"\n **Description:**\n\n This code defines a class named Click that inherits from the BaseBrowser class.\n The Click class represents a task related to browser interactions, specifically clicking on an element\n identified by a CSS selector using the Playwright library.\n\n \"\"\"\n\n name: str = \"click\"\n chat_name: str = \"Clicker\"\n description: str = (\n \"Click on an element with the given CSS selector\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"CSS selector for the element to click\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def _selector_effective(self, selector: str) -> str:\n \"\"\"\n Get the effective CSS selector considering visibility.\n\n Args:\n selector (str): The original CSS selector.\n Return:\n str: The effective CSS selector.\n\n \"\"\"\n\n if not self.visible_only:\n return selector\n return f\"{selector} >> visible=1\"\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the click task by clicking on an element with the provided CSS selector.\n\n Aegs:\n input (str): The input string containing the CSS selector.\n Return:\n str: A message indicating the success or failure of the click operation.\n\n \"\"\"\n selector = inputs[0]\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n # Navigate to the desired webpage before using this tool\n selector_effective = self._selector_effective(\n selector=selector\n )\n from playwright.sync_api import (\n TimeoutError as PlaywrightTimeoutError,\n )\n\n try:\n page.click(\n selector_effective,\n strict=self.playwright_strict,\n timeout=self.playwright_timeout,\n )\n except PlaywrightTimeoutError:\n return f\"Unable to click on element '{selector}'\"\n return f\"Clicked element '{selector}'\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the purpose of the click task.\n\n Return:\n str: A brief explanation of the task.\n\n \"\"\"\n\n return \"This task clicks on an element in an specific url\"" }, { "identifier": "CurrentWebPage", "path": "tasks/playwright/current_page.py", "snippet": "class CurrentWebPage(BaseBrowser):\n \"\"\"\n **Description:**\n\n This code defines a class named CurrentWebPage that inherits from the BaseBrowser class.\n The CurrentWebPage class represents a task related to browser interactions, specifically retrieving the URL of the current web page.\n\n \"\"\"\n\n name: str = \"current_page\"\n chat_name: str = \"CurrentPage\"\n description: str = \"Returns the URL of the current page\"\n dependencies: List[str] = []\n inputs: List[str] = []\n outputs: List[str] = []\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the task by retrieving the current page from the synchronous browser using\n the get_current_page function and returning its URL.\n\n Args:\n input (str): The input string (not used in this task).\n Return:\n str: The URL of the current web page.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n return str(page.url)\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provides a brief explanation of the current_page task.\n\n Return:\n str: An explanation of the task.\n\n \"\"\"\n\n return \"This task returns the ulr of the current page.\"" }, { "identifier": "ExtractHyperlinks", "path": "tasks/playwright/extract_hyperlinks.py", "snippet": "class ExtractHyperlinks(BaseBrowser):\n \"\"\"\n **Description:**\n\n This task extracts all hyperlinks from the current webpage.\n \"\"\"\n\n name: str = \"extract_hyperlinks\"\n chat_name: str = \"ExtractHyperLinks\"\n description: str = \"Extract all hyperlinks on the current webpage\"\n dependencies: List[str] = []\n inputs: List[str] = [\n \"Boolean: True/False. Return absolute URLs instead of relative URLs.\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n @model_validator(mode=\"before\")\n def check_bs_import(cls, values: dict) -> dict:\n \"\"\"\n Check that the arguments are valid.\n\n Args:\n values (Dict): The current attribute values.\n Return:\n Dict: The updated attribute values.\n Raise:\n ImportError: If 'beautifulsoup4' package is not installed.\n\n \"\"\"\n\n try:\n from bs4 import BeautifulSoup # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'beautifulsoup4' package is required to use this tool.\"\n \" Please install it with 'pip install beautifulsoup4'.\"\n )\n return values\n\n @staticmethod\n def scrape_page(\n page: Any, html_content: str, absolute_urls: bool\n ) -> str:\n \"\"\"\n Scrape hyperlinks from the current webpage.\n\n Args:\n page (Any): The current webpage.\n html_content (str): The HTML content of the webpage.\n absolute_urls (bool): True if absolute URLs should be returned, False otherwise.\n Return:\n str: JSON string containing the extracted hyperlinks.\n\n\n \"\"\"\n\n from urllib.parse import urljoin\n from bs4 import BeautifulSoup\n\n # Parse the HTML content with BeautifulSoup\n soup = BeautifulSoup(html_content, \"lxml\")\n\n # Find all the anchor elements and extract their href attributes\n anchors = soup.find_all(\"a\")\n if absolute_urls:\n base_url = page.url\n links = [\n urljoin(base_url, anchor.get(\"href\", \"\"))\n for anchor in anchors\n ]\n else:\n links = [anchor.get(\"href\", \"\") for anchor in anchors]\n # Return the list of links as a JSON string\n return json.dumps(links)\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the ExtractHyperlinks task.\n\n Args:\n input (str): The input parameter for the task.\n Return:\n str: JSON string containing the extracted hyperlinks.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n html_content = page.content()\n return self.scrape_page(page, html_content, inputs[0])\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a brief explanation of the ExtractHyperlinks task.\n\n Return:\n str: An explanation of the task.\n\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "ExtractText", "path": "tasks/playwright/extract_text.py", "snippet": "class ExtractText(BaseBrowser):\n \"\"\"\n **Description:**\n\n This task extracts all the text from the current webpage.\n \"\"\"\n\n name: str = \"extract_text\"\n chat_name: str = \"ExtractText\"\n description: str = \"Extract all the text on the current webpage\"\n dependencies: List[str] = [\"navigate\"]\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n @model_validator(mode=\"before\")\n def check_acheck_bs_importrgs(cls, values: dict) -> dict:\n \"\"\"\n Check that the arguments are valid.\n\n Args:\n values (Dict): The current attribute values.\n Return:\n Dict: The updated attribute values.\n Raise:\n ImportError: If 'beautifulsoup4' or 'lxml' packages are not installed.\n\n \"\"\"\n\n try:\n from bs4 import BeautifulSoup # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'beautifulsoup4' package is required to use this tool.\"\n \" Please install it with 'pip install beautifulsoup4'.\"\n )\n\n try:\n import lxml # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'lxml' package is required to use this tool.\"\n \" Please install it with 'pip install lxml'.\"\n )\n return values\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the ExtractText task.\n\n Args:\n input (str): The input parameter for the task.\n Return:\n str: The extracted text from the current webpage.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n from bs4 import BeautifulSoup\n\n self.validate_url(inputs[0].strip())\n\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n\n page = get_current_page(self.sync_browser)\n response = page.goto(inputs[0])\n status = response.status if response else \"unknown\"\n\n if status == 200:\n html_content = page.content()\n # Parse the HTML content with BeautifulSoup\n soup = BeautifulSoup(html_content, \"lxml\")\n\n return \" \".join(text for text in soup.stripped_strings)\n else:\n return (\n \"Error extracting text. The url is wrong. Try again.\"\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the ExtractText task.\n\n Return:\n str: A brief explanation of the ExtractText task.\n\n\n \"\"\"\n\n return \"This task returns the ulr of the current page.\"" }, { "identifier": "GetElements", "path": "tasks/playwright/get_elements.py", "snippet": "class GetElements(BaseBrowser):\n \"\"\"\n **Description:**\n\n The GetElements class is a subclass of BaseBrowser responsible for retrieving elements\n on the current web page that match a given CSS selector.\n \"\"\"\n\n name: str = \"get_elements\"\n chat_name: str = \"GetElements\"\n description: str = \"Retrieve elements in the current web page matching the given CSS selector\"\n dependencies: List[str] = []\n inputs: List[str] = [\n \"CSS selector, such as '*', 'div', 'p', 'a', #id, .classname\",\n \"Set of attributes to retrieve for each element\",\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n def _get_elements(\n page: SyncPage, selector: str, attributes: Sequence[str]\n ) -> List[dict]:\n \"\"\"\n Get elements matching the given CSS selector.\n\n Args:\n page (SyncPage): The current page.\n selector (str): CSS selector to match elements.\n attributes (Sequence[str]): Set of attributes to retrieve for each element.\n Return:\n List[dict]: A list of dictionaries containing the retrieved elements and their attributes.\n\n\n \"\"\"\n\n elements = page.query_selector_all(selector)\n results = []\n for element in elements:\n result = {}\n for attribute in attributes:\n if attribute == \"innerText\":\n val: Optional[str] = element.inner_text()\n else:\n val = element.get_attribute(attribute)\n if val is not None and val.strip() != \"\":\n result[attribute] = val\n if result:\n results.append(result)\n return results\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the GetElements task.\n\n Args:\n input (str): Input string containing CSS selector and attributes.\n Return:\n str: The JSON-formatted string containing the retrieved elements and their attributes.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n # Navigate to the desired webpage before using this tool\n results = self._get_elements(page, inputs[0], inputs[1])\n return json.dumps(results, ensure_ascii=False)\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the GetElements task.\n\n Return:\n str: A brief explanation of the GetElements task.\n\n \"\"\"\n\n return \"This task gets the elements.\"" }, { "identifier": "Navigate", "path": "tasks/playwright/navigate.py", "snippet": "class Navigate(BaseBrowser):\n \"\"\"\n **Description:**\n\n This class represents a browser navigation task to a specified URL using Playwright.\n \"\"\"\n\n name: str = \"navigate\"\n chat_name: str = \"Navigate\"\n description: str = \"Navigate a browser to the specified URL\"\n dependencies: List[str] = []\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the navigation action in the browser using Playwright.\n\n Args:\n input (str): The input string containing the URL to navigate to.\n Return:\n str: A message indicating whether the navigation was successful, including the URL and status code if successful,\n or an error message if unsuccessful.\n\n \"\"\"\n self.validate_url(inputs[0].strip())\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n response = page.goto(inputs[0])\n status = response.status if response else \"unknown\"\n return (\n f\"Navigating to {inputs[0]} returned status code {status}\"\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n This method provides an explanation of the task.\n\n Return:\n str: A brief explanation of the task, in this case, \"This task extracts all of the hyperlinks.\"\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "NavigateBack", "path": "tasks/playwright/navigate_back.py", "snippet": "class NavigateBack(BaseBrowser):\n \"\"\"\n **Description:**\n\n This class represents a browser navigation task using Playwright.\n \"\"\"\n\n name: str = \"navigate_back\"\n chat_name: str = \"NavigateBack\"\n description: str = (\n \"Navigate back to the previous page in the browser history\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the navigation back action in the browser using Playwright.\n\n Args:\n input (str): The input string containing the URL to navigate to.\n Return:\n str: A message indicating whether the navigation was successful, including the URL and status code if successful,\n or an error message if unsuccessful.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n response = page.go_back()\n\n if response:\n return (\n f\"Navigated back to the previous page with URL '{response.url}'.\"\n f\" Status code {response.status}\"\n )\n else:\n return \"Unable to navigate back; no previous page in the history\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n This method provides an explanation of the task.\n\n Return:\n str: A brief explanation of the task, in this case, \"This task extracts all of the hyperlinks.\"\n\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "ReadDataPipe", "path": "tasks/read_from_datapipe.py", "snippet": "class ReadDataPipe(BaseTask):\n \"\"\"\n **Description:**\n\n This code reads raw data stored in datapipe. When different tasks are executed, there are situations that the final data is stored\n in the datapipe when the final called task's output_type=True. In these situations, this task is called to retireve the latest stored data\n to be used for final inference.\n \"\"\"\n\n name: str = \"read_from_datapipe\"\n chat_name: str = \"DataPipeReader\"\n description: str = (\n \"Get the stored information from datapipe to be used to answer user query accurately. \"\n \"This should be called when the final answer is in datapipe.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"the datapipe key in the format $datapipe:key$\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n This simply retrieves data from datapipe.\n\n Args:\n inputs (List[Any]): The datapipe key\n Return:\n str: The raw data along with the instructions.\n\n \"\"\"\n if len(inputs) == 0:\n return \"\"\n return (\n \"The data along with the description for each data is provided. \"\n \"Use the data and description to provide a detailed answer regarding the user query.\\n\\n\"\n + json.dumps(inputs[0])\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide an explanation of the task.\n\n Return:\n str: Explanation of the SerpAPI task.\n\n \"\"\"\n return \"This task is to read data from datapipe.\"" }, { "identifier": "SerpAPI", "path": "tasks/serpapi.py", "snippet": "class SerpAPI(BaseTask):\n \"\"\"\n **Description:**\n\n This code defines a class named SerpAPI, which is a specific implementation of the abstract BaseTask class.\n The SerpAPI class represents a task that utilizes the SerpAPI (Google Search API) to perform internet searches\n and retrieve relevant information.\n\n \"\"\"\n\n name: str = \"serpapi\"\n chat_name: str = \"InternetSearchSerp\"\n description: str = (\n \"A low-cost Google Search API.\"\n \"Useful for when you need to answer questions about current events.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"It should be a search query.\"]\n outputs: List[str] = []\n output_type: bool = False\n\n search_engine: Any = None #: :meta private:\n params: Dict = Field(\n default={\n \"engine\": \"google\",\n \"google_domain\": \"google.com\",\n \"gl\": \"us\",\n \"hl\": \"en\",\n }\n )\n serpapi_api_key: Optional[str] = None\n aiosession: Optional[aiohttp.ClientSession] = None\n\n @model_validator(mode=\"before\")\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"\n Validate that api key and python package exists in environment.\n\n Args:\n values (Dict): The dictionary of attribute values.\n Return:\n Dict: The updated dictionary of attribute values.\n Raise:\n ValueError: If the SerpAPI python package is not installed.\n\n \"\"\"\n\n serpapi_api_key = get_from_dict_or_env(\n values, \"serpapi_api_key\", \"SERPAPI_API_KEY\"\n )\n values[\"serpapi_api_key\"] = serpapi_api_key\n try:\n from serpapi import GoogleSearch\n\n values[\"search_engine\"] = GoogleSearch\n except ImportError:\n raise ValueError(\n \"Could not import serpapi python package. \"\n \"Please install it with `pip install google-search-results`.\"\n )\n return values\n\n def get_params(self, query: str) -> Dict[str, str]:\n \"\"\"\n Get parameters for SerpAPI.\n\n Args:\n query (str): The search query.\n Return:\n Dict[str, str]: The parameters for the SerpAPI.\n\n\n \"\"\"\n\n _params = {\n \"api_key\": self.serpapi_api_key,\n \"q\": query,\n }\n params = {**self.params, **_params}\n return params\n\n def results(self, query: str) -> Dict:\n \"\"\"\n Run query through SerpAPI and return the raw result.\n\n Args:\n query (str): The search query.\n Return:\n Dict: The raw result from the SerpAPI.\n\n\n \"\"\"\n\n params = self.get_params(query)\n search = self.search_engine(params)\n res = search.get_dict()\n return res\n\n @staticmethod\n def _process_response(res: Dict) -> str:\n \"\"\"\n Process response from SerpAPI.\n\n Args:\n res (Dict): The raw response from the SerpAPI.\n Return:\n str: Processed information from the SerpAPI response.\n\n \"\"\"\n\n try:\n if \"answer_box\" in res:\n toret = (\n \"url: \"\n + res[\"answer_box\"][\"link\"]\n + \"\\nmetadata: \"\n + res[\"answer_box\"][\"snippet\"]\n )\n else:\n toret = (\n \"url: \"\n + res[\"organic_results\"][0][\"link\"]\n + \"\\nmetadata: \"\n + res[\"organic_results\"][0][\"snippet\"]\n )\n except KeyError:\n return \"Could not get the proper response from the search. Try another search query.\"\n return toret\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n Run query through SerpAPI and parse result.\n\n Args:\n input (str): The input, which should be a search query.\n Return:\n str: The parsed result from the SerpAPI.\n\n\n \"\"\"\n if len(inputs) == 0:\n return \"\"\n return self._process_response(self.results(inputs[0]))\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide an explanation of the task.\n\n Return:\n str: Explanation of the SerpAPI task.\n\n \"\"\"\n\n return (\n \"This task searched in the internet using google search engine, returns the url\"\n \"and the first top result of the google search.\"\n )" }, { "identifier": "BaseTask", "path": "tasks/task.py", "snippet": "class BaseTask(BaseModel):\n \"\"\"\n **Description:**\n\n This class is the base implementation for the Tasks. For every new task that you want to create, you should\n inherit from this class and override the attributes and methods based on your task's need. This class defines a base class named BaseTask.\n This class serves as a foundation for defining common properties and behaviors among various tasks in the system.\n\n Attributes:\n name: The name of the task. It should be unique underscore_case to be defined in TaskType. sample_task_name\n chat_name: This is the name that later will be used if needed to mention the tasks inside the chat with the user.\n It should be Camel Case. SampleTaskChatName\n description: The description of the what specifically the task is doing.\n Try to define it as specific as possible to help the Task Planner decide better.\n dependencies: You can put the name of the TaskTypes that this task is dependent on. For example, in stress detection scenario,\n the stress analysis is dependent on the fetch hrv data task. [TaskType.SERPAPI, TASKTYPE.EXTRACT_TEXT]\n inputs: This is the list of descriptions for the inputs that should be provided by the planner.\n For example if your task has two inputs: [\"the first input description\", \"the second input description\"]\n outputs: This is the list of the description of the outputs that the task returns.\n This helps the planner to understand the returned results better and use it as needed.\n For example, if the task returns a list of sleep hours for different sleep states,\n the description helps planner learn which number is related to what state.\n output_type: This indicates if the task result should be stored in the DataPipe or be returned directly to the planner.\n This process will be done in the parse_input and post_execute methods. If needed you can overwrite them.\n return_direct: This indicates if this task should completely interrupt the planning process or not.\n This is needed in cases like when you want to ask a question from user and no further\n planning is needed until the user gives the proper answer (look at ask_user task)\n \"\"\"\n\n name: str\n chat_name: str\n description: str\n dependencies: List[str] = []\n inputs: List[str] = []\n outputs: List[str] = []\n datapipe: DataPipe = None\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = False\n # False if planner should continue. True if after this task the planning should be\n # on pause or stop. examples are when you have a task that asks user to provide more information\n return_direct: bool = False\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def name(self):\n return self.name\n\n @property\n def dependencies(self):\n return self.dependencies\n\n @property\n def inputs(self):\n return \", \".join(\n [\n f\"{str(i)}-{input}\"\n for i, input in enumerate(self.inputs)\n ]\n )\n\n @abstractmethod\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task. You should implement this method based on your need.\n This method is called by the **execute** method that provides the parsed inputs to this method.\n\n Args:\n inputs (List[Any]): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n \"\"\"\n\n def _parse_input(\n self,\n input_args: str,\n ) -> List[str]:\n \"\"\"\n Parses the input string into a list of strings. If the input is in format `datapipe:key`,\n the parser will retrieve the data from datapipe before sending it over to the **_execute** method.\n\n Args:\n input_args (str): Input string provided by planner. It should be parsed and return a list of str variables.\n Return:\n List[str]: List of parsed strings. These strings can be converted into desired types inside **_execute** method.\n\n\n \"\"\"\n inputs = input_args.split(\",\")\n return [\n json.loads(\n self.datapipe.retrieve(\n re.search(r\"datapipe:[0-9a-f\\-]{36}\", arg)\n .group()\n .strip()\n .split(\":\")[-1]\n )\n )\n if \"datapipe\" in arg\n else arg.strip()\n for arg in inputs\n ]\n\n def _post_execute(self, result: str = \"\"):\n \"\"\"\n This method is called inside **execute** method after calling **_execute**. The result of **_execute** will be passed to this method\n in case the **output_type** attribute is True, the result will be stored inside the datapipe and the datapipe key is returned to\n the plenner instead of the raw result. This is good practice for times that you have intermediate data (like sleep data over a month)\n and it needs to be passed over to other tasks and the raw result is not immidiately needed.\n This will save a huge amount of tokens and makes sure that the planner will not pass wrong raw data to the tasks.\n\n It is important to note that to make the **DataPipe's** stored data standard and unified, we store the data in the json string\n format that currently contains 'data' and 'description' keys. The 'data' will be the returned data after execution and the 'description'\n is created using the **outputs** attribute of the task. Whenever the raw data is returned to the planner, these **outputs** descriptions\n will help the planner understand and learn how to interpret the 'data' to generate the final answer or continue planning.\n\n Args:\n result (str): string containig the task result.\n Return:\n List[str]: List of parsed strings.\n\n \"\"\"\n if self.output_type:\n key = self.datapipe.store(\n json.dumps(\n {\n \"data\": result,\n \"description\": \",\".join(self.outputs),\n }\n )\n )\n return (\n f\"The result of the tool {self.name} is stored in the datapipe with key: $datapipe:{key}$\"\n \" pass this key to other tools to access to the result or call read_from_datapipe to get the raw data.\"\n )\n return result\n\n def execute(self, input_args: str) -> str:\n \"\"\"\n This method is called by the **Orchestrator** which provides the planner provided inputs.\n This method first calls **_parse_input** to parse the inputs and retrieve needed data from the **DataPipe**\n Then **_execute** is called and the parsed inputs are given to this method. Finally the final result of execution is passed to\n **_post_execute** and ith will either be stored inside **DataPipe** or directly returned to the planner to continue planning.\n\n Args:\n input_args (str): Input string provided by planner.\n Return:\n str: The final result of the task execution.\n\n \"\"\"\n inputs = self._parse_input(input_args)\n result = self._execute(inputs)\n return self._post_execute(result)\n\n def get_dict(self) -> str:\n \"\"\"\n Generate a dictionary-like representation of the task.\n\n Return:\n str: String representation of the task dictionary.\n\n\n \"\"\"\n inputs = \",\".join(\n f\"input{i+1}-{word}\" for i, word in enumerate(self.inputs)\n )\n dependencies = \",\".join(\n f\"{i+1}-{word}\"\n for i, word in enumerate(self.dependencies)\n )\n prompt = (\n f\"tool name:{self.name}, description: {self.description}.\"\n )\n if len(self.inputs) > 0:\n prompt += f\"The input to this tool should be comma separated list of data representing: {inputs}\"\n if len(self.dependencies) > 0:\n prompt += f\"\\nThis tool is dependent on the following tools. make sure these tools are called first: '{dependencies}'\"\n # prompt += \"\\n\"\n return prompt\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n\n \"\"\"\n\n return \"\"\"\n Sample Explanation\n \"\"\"" }, { "identifier": "TaskType", "path": "tasks/task_types.py", "snippet": "class TaskType(str, Enum):\n SERPAPI = \"serpapi\"\n CLICK = \"click\"\n GET_CURRENT_PAGE = \"current_page\"\n EXTRACT_HYPERLINKS = \"extract_hyperlinks\"\n EXTRACT_TEXT = \"extract_text\"\n GET_ELEMENTS = \"get_elements\"\n NAVIGATE_BACK = \"navigate_back\"\n NAVIGATE = \"navigate\"\n AFFECT_SLEEP_GET = \"affect_sleep_get\"\n AFFECT_ACTIVITY_GET = \"affect_activity_get\"\n AFFECT_SLEEP_ANALYSIS = \"affect_sleep_analysis\"\n AFFECT_ACTIVITY_ANALYSIS = \"affect_activity_analysis\"\n GOOGLE_TRANSLATE = \"google_translate\"\n ASK_USER = \"ask_user\"\n READ_FROM_DATAPIPE = \"read_from_datapipe\"\n TEST_FILE = \"test_file\"" }, { "identifier": "TestFile", "path": "tasks/test_file.py", "snippet": "class TestFile(BaseTask):\n name: str = \"test_file\"\n chat_name: str = \"TestFile\"\n description: str = \"analyzes the image and returns description.\"\n dependencies: List[str] = []\n inputs: List[str] = [\"the image file name\"]\n outputs: List[str] = []\n output_type: bool = False\n return_direct: bool = True\n\n translator: Any = None #: :meta private:\n\n def parse_input(\n self,\n input: str,\n ) -> List[str]:\n \"\"\"\n Parse the input string into a list of strings.\n\n Args:\n input (str): Input string to be parsed.\n Return:\n List[str]: List of parsed strings.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n return input.split(\"$#\")\n\n def execute(\n self,\n input: str,\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task.\n\n Args:\n input (str): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n self.parse_input(input)\n return \"this image is a classification results of a data\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n return \"This task simply asks user to provide more information or continue interaction.\"" } ]
from typing import Dict from typing import Type from tasks.affect import ActivityAnalysis from tasks.affect import ActivityGet from tasks.affect import SleepAnalysis from tasks.affect import SleepGet from tasks.ask_user import AskUser from tasks.google_translator import GoogleTranslate from tasks.playwright import Click from tasks.playwright import CurrentWebPage from tasks.playwright import ExtractHyperlinks from tasks.playwright import ExtractText from tasks.playwright import GetElements from tasks.playwright import Navigate from tasks.playwright import NavigateBack from tasks.read_from_datapipe import ReadDataPipe from tasks.serpapi import SerpAPI from tasks.task import BaseTask from tasks.task_types import TaskType from tasks.test_file import TestFile
13,939
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText,
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText,
TaskType.GET_ELEMENTS: GetElements,
10
2023-12-02 05:10:44+00:00
16k
Czm369/MixPL
mmdet/models/dense_heads/atss_vlfusion_head.py
[ { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "cat_boxes", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]],\n dim: int = 0) -> Union[Tensor, BaseBoxes]:\n \"\"\"Concatenate boxes with type of tensor or box type.\n\n Args:\n data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors\n or box types need to be concatenated.\n dim (int): The dimension over which the box are concatenated.\n Defaults to 0.\n\n Returns:\n Union[Tensor, :obj`BaseBoxes`]: Concatenated results.\n \"\"\"\n if data_list and isinstance(data_list[0], BaseBoxes):\n return data_list[0].cat(data_list, dim=dim)\n else:\n return torch.cat(data_list, dim=dim)" }, { "identifier": "reduce_mean", "path": "mmdet/utils/dist_utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "InstanceList", "path": "mmdet/utils/typing_utils.py", "snippet": "" }, { "identifier": "filter_scores_and_topk", "path": "mmdet/models/utils/misc.py", "snippet": "def filter_scores_and_topk(scores, score_thr, topk, results=None):\n \"\"\"Filter results using score threshold and topk candidates.\n\n Args:\n scores (Tensor): The scores, shape (num_bboxes, K).\n score_thr (float): The score filter threshold.\n topk (int): The number of topk candidates.\n results (dict or list or Tensor, Optional): The results to\n which the filtering rule is to be applied. The shape\n of each item is (num_bboxes, N).\n\n Returns:\n tuple: Filtered results\n\n - scores (Tensor): The scores after being filtered, \\\n shape (num_bboxes_filtered, ).\n - labels (Tensor): The class labels, shape \\\n (num_bboxes_filtered, ).\n - anchor_idxs (Tensor): The anchor indexes, shape \\\n (num_bboxes_filtered, ).\n - filtered_results (dict or list or Tensor, Optional): \\\n The filtered results. The shape of each item is \\\n (num_bboxes_filtered, N).\n \"\"\"\n valid_mask = scores > score_thr\n scores = scores[valid_mask]\n valid_idxs = torch.nonzero(valid_mask)\n\n num_topk = min(topk, valid_idxs.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n scores, idxs = scores.sort(descending=True)\n scores = scores[:num_topk]\n topk_idxs = valid_idxs[idxs[:num_topk]]\n keep_idxs, labels = topk_idxs.unbind(dim=1)\n\n filtered_results = None\n if results is not None:\n if isinstance(results, dict):\n filtered_results = {k: v[keep_idxs] for k, v in results.items()}\n elif isinstance(results, list):\n filtered_results = [result[keep_idxs] for result in results]\n elif isinstance(results, torch.Tensor):\n filtered_results = results[keep_idxs]\n else:\n raise NotImplementedError(f'Only supports dict or list or Tensor, '\n f'but get {type(results)}.')\n return scores, labels, keep_idxs, filtered_results" }, { "identifier": "select_single_mlvl", "path": "mmdet/models/utils/misc.py", "snippet": "def select_single_mlvl(mlvl_tensors, batch_id, detach=True):\n \"\"\"Extract a multi-scale single image tensor from a multi-scale batch\n tensor based on batch index.\n\n Note: The default value of detach is True, because the proposal gradient\n needs to be detached during the training of the two-stage model. E.g\n Cascade Mask R-CNN.\n\n Args:\n mlvl_tensors (list[Tensor]): Batch tensor for all scale levels,\n each is a 4D-tensor.\n batch_id (int): Batch index.\n detach (bool): Whether detach gradient. Default True.\n\n Returns:\n list[Tensor]: Multi-scale single image tensor.\n \"\"\"\n assert isinstance(mlvl_tensors, (list, tuple))\n num_levels = len(mlvl_tensors)\n\n if detach:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id].detach() for i in range(num_levels)\n ]\n else:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id] for i in range(num_levels)\n ]\n return mlvl_tensor_list" }, { "identifier": "unpack_gt_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def unpack_gt_instances(batch_data_samples: SampleList) -> tuple:\n \"\"\"Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based\n on ``batch_data_samples``\n\n Args:\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n Returns:\n tuple:\n\n - batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n - batch_gt_instances_ignore (list[:obj:`InstanceData`]):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n - batch_img_metas (list[dict]): Meta information of each image,\n e.g., image size, scaling factor, etc.\n \"\"\"\n batch_gt_instances = []\n batch_gt_instances_ignore = []\n batch_img_metas = []\n for data_sample in batch_data_samples:\n batch_img_metas.append(data_sample.metainfo)\n batch_gt_instances.append(data_sample.gt_instances)\n if 'ignored_instances' in data_sample:\n batch_gt_instances_ignore.append(data_sample.ignored_instances)\n else:\n batch_gt_instances_ignore.append(None)\n\n return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas" }, { "identifier": "BertEncoderLayer", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class BertEncoderLayer(BertPreTrainedModel):\n \"\"\"A modified version of the `BertLayer` class from the\n `transformers.models.bert.modeling_bert` module.\n\n Args:\n config (:class:`~transformers.BertConfig`):\n The configuration object that\n contains various parameters for the model.\n clamp_min_for_underflow (bool, optional):\n Whether to clamp the minimum value of the hidden states\n to prevent underflow. Defaults to `False`.\n clamp_max_for_overflow (bool, optional):\n Whether to clamp the maximum value of the hidden states\n to prevent overflow. Defaults to `False`.\n \"\"\"\n\n def __init__(self,\n config: BertConfig,\n clamp_min_for_underflow: bool = False,\n clamp_max_for_overflow: bool = False):\n super().__init__(config)\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n\n self.attention = BertAttention(config, clamp_min_for_underflow,\n clamp_max_for_overflow)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self, inputs: Dict[str, Dict[str, torch.Tensor]]\n ) -> Dict[str, Dict[str, torch.Tensor]]:\n \"\"\"Applies the BertEncoderLayer to the input features.\"\"\"\n language_dict_features = inputs['lang']\n hidden_states = language_dict_features['hidden']\n attention_mask = language_dict_features['masks']\n\n device = hidden_states.device\n input_shape = hidden_states.size()[:-1]\n extended_attention_mask = self.get_extended_attention_mask(\n attention_mask, input_shape, device)\n\n self_attention_outputs = self.attention(\n hidden_states,\n extended_attention_mask,\n None,\n output_attentions=False,\n past_key_value=None)\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:]\n layer_output = apply_chunking_to_forward(self.feed_forward_chunk,\n self.chunk_size_feed_forward,\n self.seq_len_dim,\n attention_output)\n outputs = (layer_output, ) + outputs\n hidden_states = outputs[0]\n\n language_dict_features['hidden'] = hidden_states\n\n features_dict = {\n 'visual': inputs['visual'],\n 'lang': language_dict_features\n }\n\n return features_dict\n\n def feed_forward_chunk(self, attention_output: Tensor) -> Tensor:\n \"\"\"Applies the intermediate and output layers of the BertEncoderLayer\n to a chunk of the input sequence.\"\"\"\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output" }, { "identifier": "VLFuse", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class VLFuse(nn.Module):\n \"\"\"Early Fusion Module.\n\n Args:\n v_dim (int): Dimension of visual features.\n l_dim (int): Dimension of language features.\n embed_dim (int): The embedding dimension for the attention operation.\n num_heads (int): Number of attention heads.\n dropout (float): Dropout probability.\n drop_path (float): Drop path probability.\n use_checkpoint (bool): Whether to use PyTorch's checkpoint function.\n \"\"\"\n\n def __init__(self,\n v_dim: int = 256,\n l_dim: int = 768,\n embed_dim: int = 2048,\n num_heads: int = 8,\n dropout: float = 0.1,\n drop_path: float = 0.0,\n use_checkpoint: bool = False):\n super().__init__()\n self.use_checkpoint = use_checkpoint\n self.b_attn = BiAttentionBlock(\n v_dim=v_dim,\n l_dim=l_dim,\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=dropout,\n drop_path=drop_path,\n init_values=1.0 / 6.0)\n\n def forward(self, x: dict) -> dict:\n \"\"\"Forward pass of the VLFuse module.\"\"\"\n visual_features = x['visual']\n language_dict_features = x['lang']\n\n if self.use_checkpoint:\n # vf is mean visual_features\n # checkpoint does not allow complex data structures as input,\n # such as list, so we must split them.\n vf0, vf1, vf2, vf3, vf4, language_features = checkpoint.checkpoint(\n self.b_attn, *visual_features,\n language_dict_features['hidden'],\n language_dict_features['masks'])\n else:\n vf0, vf1, vf2, vf3, vf4, language_features = self.b_attn(\n *visual_features, language_dict_features['hidden'],\n language_dict_features['masks'])\n\n language_dict_features['hidden'] = language_features\n fused_language_dict_features = language_dict_features\n\n features_dict = {\n 'visual': [vf0, vf1, vf2, vf3, vf4],\n 'lang': fused_language_dict_features\n }\n\n return features_dict" }, { "identifier": "permute_and_flatten", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "def permute_and_flatten(layer: Tensor, N: int, A: int, C: int, H: int,\n W: int) -> Tensor:\n \"\"\"Permute and then flatten a tensor,\n\n from size (N, A, C, H, W) to (N, H * W * A, C).\n\n Args:\n layer (Tensor): Tensor of shape (N, C, H, W).\n N (int): Batch size.\n A (int): Number of attention heads.\n C (int): Number of channels.\n H (int): Height of feature map.\n W (int): Width of feature map.\n\n Returns:\n Tensor: A Tensor of shape (N, H * W * A, C).\n \"\"\"\n layer = layer.view(N, A, C, H, W)\n layer = layer.permute(0, 3, 4, 1, 2)\n layer = layer.reshape(N, -1, C)\n return layer" }, { "identifier": "MAX_CLAMP_VALUE", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "MAX_CLAMP_VALUE = 50000" }, { "identifier": "ATSSHead", "path": "mmdet/models/dense_heads/atss_head.py", "snippet": "class ATSSHead(AnchorHead):\n \"\"\"Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_.\n\n ATSS head structure is similar with FCOS, however ATSS use anchor boxes\n and assign label by Adaptive Training Sample Selection instead max-iou.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n pred_kernel_size (int): Kernel size of ``nn.Conv2d``\n stacked_convs (int): Number of stacking convs of the head.\n conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n convolution layer. Defaults to None.\n norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n layer. Defaults to ``dict(type='GN', num_groups=32,\n requires_grad=True)``.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied directly on decoded bounding boxes, converting both\n the predicted boxes and regression targets to absolute\n coordinates format. Defaults to False. It should be `True` when\n using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss.\n Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True,\n loss_weight=1.0)``.\n init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n list[:obj:`ConfigDict`]): Initialization config dict.\n \"\"\"\n\n def __init__(self,\n num_classes: int,\n in_channels: int,\n pred_kernel_size: int = 3,\n stacked_convs: int = 4,\n conv_cfg: OptConfigType = None,\n norm_cfg: ConfigType = dict(\n type='GN', num_groups=32, requires_grad=True),\n reg_decoded_bbox: bool = True,\n loss_centerness: ConfigType = dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n init_cfg: MultiConfig = dict(\n type='Normal',\n layer='Conv2d',\n std=0.01,\n override=dict(\n type='Normal',\n name='atss_cls',\n std=0.01,\n bias_prob=0.01)),\n **kwargs) -> None:\n self.pred_kernel_size = pred_kernel_size\n self.stacked_convs = stacked_convs\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n super().__init__(\n num_classes=num_classes,\n in_channels=in_channels,\n reg_decoded_bbox=reg_decoded_bbox,\n init_cfg=init_cfg,\n **kwargs)\n\n self.sampling = False\n self.loss_centerness = MODELS.build(loss_centerness)\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers of the head.\"\"\"\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of classification scores and bbox prediction\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n \"\"\"\n return multi_apply(self.forward_single, x, self.scales)\n\n def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:\n \"\"\"Forward feature of a single scale level.\n\n Args:\n x (Tensor): Features of a single scale level.\n scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n the bbox prediction.\n\n Returns:\n tuple:\n cls_score (Tensor): Cls scores for a single scale level\n the channels number is num_anchors * num_classes.\n bbox_pred (Tensor): Box energies / deltas for a single scale\n level, the channels number is num_anchors * 4.\n centerness (Tensor): Centerness for a single scale level, the\n channel number is (N, num_anchors * 1, H, W).\n \"\"\"\n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n cls_score = self.atss_cls(cls_feat)\n # we just follow atss, not apply exp in bbox_pred\n bbox_pred = scale(self.atss_reg(reg_feat)).float()\n centerness = self.atss_centerness(reg_feat)\n return cls_score, bbox_pred, centerness\n\n def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n bbox_pred: Tensor, centerness: Tensor,\n labels: Tensor, label_weights: Tensor,\n bbox_targets: Tensor, avg_factor: float) -> dict:\n \"\"\"Calculate the loss of a single scale level based on the features\n extracted by the detection head.\n\n Args:\n cls_score (Tensor): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W).\n bbox_pred (Tensor): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W).\n anchors (Tensor): Box reference for each scale level with shape\n (N, num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (N, num_total_anchors).\n label_weights (Tensor): Label weights of each anchor with shape\n (N, num_total_anchors)\n bbox_targets (Tensor): BBox regression targets of each anchor with\n shape (N, num_total_anchors, 4).\n avg_factor (float): Average factor that is used to average\n the loss. When using sampling method, avg_factor is usually\n the sum of positive and negative priors. When using\n `PseudoSampler`, `avg_factor` is usually equal to the number\n of positive priors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n\n anchors = anchors.reshape(-1, 4)\n cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n -1, self.cls_out_channels).contiguous()\n bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n centerness = centerness.permute(0, 2, 3, 1).reshape(-1)\n bbox_targets = bbox_targets.reshape(-1, 4)\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n\n # classification loss\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=avg_factor)\n\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n bg_class_ind = self.num_classes\n pos_inds = ((labels >= 0)\n & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n if len(pos_inds) > 0:\n pos_bbox_targets = bbox_targets[pos_inds]\n pos_bbox_pred = bbox_pred[pos_inds]\n pos_anchors = anchors[pos_inds]\n pos_centerness = centerness[pos_inds]\n\n centerness_targets = self.centerness_target(\n pos_anchors, pos_bbox_targets)\n pos_decode_bbox_pred = self.bbox_coder.decode(\n pos_anchors, pos_bbox_pred)\n\n # regression loss\n loss_bbox = self.loss_bbox(\n pos_decode_bbox_pred,\n pos_bbox_targets,\n weight=centerness_targets,\n avg_factor=1.0)\n\n # centerness loss\n loss_centerness = self.loss_centerness(\n pos_centerness, centerness_targets, avg_factor=avg_factor)\n\n else:\n loss_bbox = bbox_pred.sum() * 0\n loss_centerness = centerness.sum() * 0\n centerness_targets = bbox_targets.new_tensor(0.)\n\n return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()\n\n def loss_by_feat(\n self,\n cls_scores: List[Tensor],\n bbox_preds: List[Tensor],\n centernesses: List[Tensor],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n \"\"\"Calculate the loss based on the features extracted by the detection\n head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n centernesses (list[Tensor]): Centerness for each scale\n level with shape (N, num_anchors * 1, H, W)\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]\n assert len(featmap_sizes) == self.prior_generator.num_levels\n\n device = cls_scores[0].device\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, batch_img_metas, device=device)\n\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, avg_factor) = cls_reg_targets\n avg_factor = reduce_mean(\n torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n losses_cls, losses_bbox, loss_centerness, \\\n bbox_avg_factor = multi_apply(\n self.loss_by_feat_single,\n anchor_list,\n cls_scores,\n bbox_preds,\n centernesses,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n avg_factor=avg_factor)\n\n bbox_avg_factor = sum(bbox_avg_factor)\n bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n return dict(\n loss_cls=losses_cls,\n loss_bbox=losses_bbox,\n loss_centerness=loss_centerness)\n\n def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor:\n \"\"\"Calculate the centerness between anchors and gts.\n\n Only calculate pos centerness targets, otherwise there may be nan.\n\n Args:\n anchors (Tensor): Anchors with shape (N, 4), \"xyxy\" format.\n gts (Tensor): Ground truth bboxes with shape (N, 4), \"xyxy\" format.\n\n Returns:\n Tensor: Centerness between anchors and gts.\n \"\"\"\n anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n l_ = anchors_cx - gts[:, 0]\n t_ = anchors_cy - gts[:, 1]\n r_ = gts[:, 2] - anchors_cx\n b_ = gts[:, 3] - anchors_cy\n\n left_right = torch.stack([l_, r_], dim=1)\n top_bottom = torch.stack([t_, b_], dim=1)\n centerness = torch.sqrt(\n (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *\n (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))\n assert not torch.isnan(centerness).any()\n return centerness\n\n def get_targets(self,\n anchor_list: List[List[Tensor]],\n valid_flag_list: List[List[Tensor]],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Get targets for ATSS head.\n\n This method is almost the same as `AnchorHead.get_targets()`. Besides\n returning the targets as the parent method does, it also returns the\n anchors as the first element of the returned tuple.\n \"\"\"\n num_imgs = len(batch_img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if batch_gt_instances_ignore is None:\n batch_gt_instances_ignore = [None] * num_imgs\n (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n all_bbox_weights, pos_inds_list, neg_inds_list,\n sampling_results_list) = multi_apply(\n self._get_targets_single,\n anchor_list,\n valid_flag_list,\n num_level_anchors_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore,\n unmap_outputs=unmap_outputs)\n # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n # When using sampling method, avg_factor is usually the sum of\n # positive and negative priors. When using `PseudoSampler`,\n # `avg_factor` is usually equal to the number of positive priors.\n avg_factor = sum(\n [results.avg_factor for results in sampling_results_list])\n # split targets to a list w.r.t. multiple levels\n anchors_list = images_to_levels(all_anchors, num_level_anchors)\n labels_list = images_to_levels(all_labels, num_level_anchors)\n label_weights_list = images_to_levels(all_label_weights,\n num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets,\n num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights,\n num_level_anchors)\n return (anchors_list, labels_list, label_weights_list,\n bbox_targets_list, bbox_weights_list, avg_factor)\n\n def _get_targets_single(self,\n flat_anchors: Tensor,\n valid_flags: Tensor,\n num_level_anchors: List[int],\n gt_instances: InstanceData,\n img_meta: dict,\n gt_instances_ignore: Optional[InstanceData] = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Compute regression, classification targets for anchors in a single\n image.\n\n Args:\n flat_anchors (Tensor): Multi-level anchors of the image, which are\n concatenated into a single tensor of shape (num_anchors ,4)\n valid_flags (Tensor): Multi level valid flags of the image,\n which are concatenated into a single tensor of\n shape (num_anchors,).\n num_level_anchors (List[int]): Number of anchors of each scale\n level.\n gt_instances (:obj:`InstanceData`): Ground truth of instance\n annotations. It usually includes ``bboxes`` and ``labels``\n attributes.\n img_meta (dict): Meta information for current image.\n gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n to be ignored during training. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n unmap_outputs (bool): Whether to map outputs back to the original\n set of anchors.\n\n Returns:\n tuple: N is the number of total anchors in the image.\n labels (Tensor): Labels of all anchors in the image with shape\n (N,).\n label_weights (Tensor): Label weights of all anchor in the\n image with shape (N,).\n bbox_targets (Tensor): BBox targets of all anchors in the\n image with shape (N, 4).\n bbox_weights (Tensor): BBox weights of all anchors in the\n image with shape (N, 4)\n pos_inds (Tensor): Indices of positive anchor with shape\n (num_pos,).\n neg_inds (Tensor): Indices of negative anchor with shape\n (num_neg,).\n sampling_result (:obj:`SamplingResult`): Sampling results.\n \"\"\"\n inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg['allowed_border'])\n if not inside_flags.any():\n raise ValueError(\n 'There is no valid anchor inside the image boundary. Please '\n 'check the image size and anchor sizes, or set '\n '``allowed_border`` to -1 to skip the condition.')\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n pred_instances = InstanceData(priors=anchors)\n assign_result = self.assigner.assign(pred_instances,\n num_level_anchors_inside,\n gt_instances, gt_instances_ignore)\n\n sampling_result = self.sampler.sample(assign_result, pred_instances,\n gt_instances)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n if self.reg_decoded_bbox:\n pos_bbox_targets = sampling_result.pos_gt_bboxes\n else:\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_priors, sampling_result.pos_gt_bboxes)\n\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n\n labels[pos_inds] = sampling_result.pos_gt_labels\n if self.train_cfg['pos_weight'] <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg['pos_weight']\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = unmap(anchors, num_total_anchors, inside_flags)\n labels = unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds, sampling_result)\n\n def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n \"\"\"Get the number of valid anchors in every level.\"\"\"\n\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside" } ]
import copy import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List, Optional, Sequence, Tuple, Union from mmcv.cnn import Scale from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModel from mmengine.structures import InstanceData from torch import Tensor from transformers import BertConfig from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import (BertEncoderLayer, VLFuse, filter_scores_and_topk, permute_and_flatten, select_single_mlvl, unpack_gt_instances) from ..utils.vlfuse_helper import MAX_CLAMP_VALUE from .atss_head import ATSSHead
10,921
use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: BertConfig = None def convert_grounding_to_cls_scores(logits: Tensor, positive_maps: List[dict]) -> Tensor: """Convert logits to class scores.""" assert len(positive_maps) == logits.shape[0] # batch size scores = torch.zeros(logits.shape[0], logits.shape[1], len(positive_maps[0])).to(logits.device) if positive_maps is not None: if all(x == positive_maps[0] for x in positive_maps): # only need to compute once positive_map = positive_maps[0] for label_j in positive_map: scores[:, :, label_j - 1] = logits[:, :, torch.LongTensor(positive_map[label_j] )].mean(-1) else: for i, positive_map in enumerate(positive_maps): for label_j in positive_map: scores[i, :, label_j - 1] = logits[ i, :, torch.LongTensor(positive_map[label_j])].mean(-1) return scores class Conv3x3Norm(nn.Module): """Conv3x3 and norm.""" def __init__(self, in_channels: int, out_channels: int, stride: int, groups: int = 1, use_dcn: bool = False, norm_type: Optional[Union[Sequence, str]] = None): super().__init__() if use_dcn: self.conv = ModulatedDeformConv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) if isinstance(norm_type, Sequence): assert len(norm_type) == 2 assert norm_type[0] == 'gn' gn_group = norm_type[1] norm_type = norm_type[0] if norm_type == 'bn': bn_op = nn.BatchNorm2d(out_channels) elif norm_type == 'gn': bn_op = nn.GroupNorm( num_groups=gn_group, num_channels=out_channels) if norm_type is not None: self.bn = bn_op else: self.bn = None def forward(self, x, **kwargs): x = self.conv(x, **kwargs) if self.bn: x = self.bn(x) return x class DyReLU(nn.Module): """Dynamic ReLU.""" def __init__(self, in_channels: int, out_channels: int, expand_ratio: int = 4): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.expand_ratio = expand_ratio self.out_channels = out_channels self.fc = nn.Sequential( nn.Linear(in_channels, in_channels // expand_ratio), nn.ReLU(inplace=True), nn.Linear(in_channels // expand_ratio, out_channels * self.expand_ratio), nn.Hardsigmoid(inplace=True)) def forward(self, x) -> Tensor: x_out = x b, c, h, w = x.size() x = self.avg_pool(x).view(b, c) x = self.fc(x).view(b, -1, 1, 1) a1, b1, a2, b2 = torch.split(x, self.out_channels, dim=1) a1 = (a1 - 0.5) * 2 + 1.0 a2 = (a2 - 0.5) * 2 b1 = b1 - 0.5 b2 = b2 - 0.5 out = torch.max(x_out * a1 + b1, x_out * a2 + b2) return out class DyConv(nn.Module): """Dynamic Convolution.""" def __init__(self, conv_func: Callable, in_channels: int, out_channels: int, use_dyfuse: bool = True, use_dyrelu: bool = False, use_dcn: bool = False): super().__init__() self.dyconvs = nn.ModuleList() self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 2)) if use_dyfuse: self.attnconv = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)) self.h_sigmoid = nn.Hardsigmoid(inplace=True) else: self.attnconv = None if use_dyrelu: self.relu = DyReLU(in_channels, out_channels) else: self.relu = nn.ReLU() if use_dcn: self.offset = nn.Conv2d( in_channels, 27, kernel_size=3, stride=1, padding=1) else: self.offset = None self.init_weights() def init_weights(self): for m in self.dyconvs.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() if self.attnconv is not None: for m in self.attnconv.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, inputs: dict) -> dict: visual_feats = inputs['visual'] out_vis_feats = [] for level, feature in enumerate(visual_feats): offset_conv_args = {} if self.offset is not None: offset_mask = self.offset(feature) offset = offset_mask[:, :18, :, :] mask = offset_mask[:, 18:, :, :].sigmoid() offset_conv_args = dict(offset=offset, mask=mask) temp_feats = [self.dyconvs[1](feature, **offset_conv_args)] if level > 0: temp_feats.append(self.dyconvs[2](visual_feats[level - 1], **offset_conv_args)) if level < len(visual_feats) - 1: temp_feats.append( F.upsample_bilinear( self.dyconvs[0](visual_feats[level + 1], **offset_conv_args), size=[feature.size(2), feature.size(3)])) mean_feats = torch.mean( torch.stack(temp_feats), dim=0, keepdim=False) if self.attnconv is not None: attn_feat = [] res_feat = [] for feat in temp_feats: res_feat.append(feat) attn_feat.append(self.attnconv(feat)) res_feat = torch.stack(res_feat) spa_pyr_attn = self.h_sigmoid(torch.stack(attn_feat)) mean_feats = torch.mean( res_feat * spa_pyr_attn, dim=0, keepdim=False) out_vis_feats.append(mean_feats) out_vis_feats = [self.relu(item) for item in out_vis_feats] features_dict = {'visual': out_vis_feats, 'lang': inputs['lang']} return features_dict class VLFusionModule(BaseModel): """Visual-lang Fusion Module.""" def __init__(self, in_channels: int, feat_channels: int, num_base_priors: int, early_fuse: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', use_dyrelu: bool = True, use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits
@MODELS.register_module()
0
2023-11-30 08:58:00+00:00
16k
SEU-ProactiveSecurity-Group/MalPurifier
core/defense/md_at_ma.py
[ { "identifier": "Max", "path": "core/attack/max.py", "snippet": "class Max(BaseAttack):\n \"\"\"\n Max攻击:迭代地从多个攻击方法中选择结果。\n\n 参数\n --------\n @param attack_list: List, 已实例化的攻击对象的列表。\n @param varepsilon: Float, 用于判断收敛性的标量。\n \"\"\"\n\n def __init__(self, attack_list, varepsilon=1e-20,\n is_attacker=True, oblivion=False, kappa=1., manipulation_x=None, omega=None, device=None):\n \"\"\"\n 构造函数\n\n 参数:\n - attack_list: 已实例化的攻击对象的列表,至少应该有一个攻击方法。\n - varepsilon: 用于判断收敛性的标量,默认值为1e-20。\n - is_attacker: Bool, 表示是否为攻击者,默认为True。\n - oblivion: Bool, 一个布尔标志(其功能在这里并未详细说明),默认为False。\n - kappa: Float, 一个浮点数参数,默认为1。\n - manipulation_x: 可能与数据的处理或操纵有关,具体用途未详细说明。\n - omega: 参数omega的具体用途未详细说明。\n - device: 设备,例如'cuda'或'cpu',用于执行计算。\n\n 注意:\n - 在初始化过程中,会首先检查`attack_list`是否包含至少一个攻击对象。\n \"\"\"\n super(Max, self).__init__(is_attacker, oblivion, kappa, manipulation_x, omega, device) # 调用父类的构造函数\n assert len(attack_list) > 0, '至少需要一个攻击方法。' # 确保提供了至少一个攻击对象\n self.attack_list = attack_list # 设置攻击列表\n self.varepsilon = varepsilon # 设置varepsilon值\n self.device = device # 设置计算设备\n\n def perturb(self, model, x, label=None, steps_max=5, min_lambda_=1e-5, max_lambda_=1e5, verbose=False):\n \"\"\"\n 扰动节点特征\n\n 参数\n -----------\n @param model: 受害者模型。\n @param x: torch.FloatTensor, 形状为[batch_size, vocab_dim]的特征向量。\n @param label: torch.LongTensor, 真实标签。\n @param steps_max: Integer, 最大的迭代次数。\n @param min_lambda_: float, 平衡对手检测器的重要性(如果存在)。\n @param max_lambda_: float, 同上。\n @param verbose: Boolean, 是否打印详细日志。\n\n 返回值\n --------\n adv_x: 扰动后的数据。\n \"\"\"\n\n # 判断输入数据是否有效\n if x is None or x.shape[0] <= 0:\n return []\n\n # 将模型设为评估模式,主要是为了禁用一些在训练模式下的特殊层,比如Dropout\n model.eval()\n\n # 获取输入数据x在当前模型下的损失和完成状态\n with torch.no_grad():\n loss, done = self.get_scores(model, x, label)\n\n # 存储当前的损失为前一次的损失\n pre_loss = loss\n\n # 获取输入数据的数量以及其他的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n\n # 初始化攻击样本为输入数据的拷贝\n adv_x = x.detach().clone()\n\n # 初始化停止标志,用于表示哪些样本已经完成了攻击\n stop_flag = torch.zeros(n, dtype=torch.bool, device=self.device)\n\n # 开始主循环,进行多次迭代以改进攻击效果\n for t in range(steps_max):\n # 计算还未完成攻击的样本数量\n num_sample_red = n - torch.sum(stop_flag)\n \n # 如果所有样本都已完成攻击,结束循环\n if num_sample_red <= 0:\n break\n\n # 获取那些还未完成攻击的样本的真实标签\n red_label = label[~stop_flag]\n pertbx = []\n\n # 对于攻击方法列表中的每种攻击方法,尝试对数据进行扰动\n for attack in self.attack_list:\n # 确保每种攻击方法都实现了perturb方法\n assert 'perturb' in type(attack).__dict__.keys()\n\n # 对于某些特定的攻击方法,在第二次及以后的迭代中取消随机化\n if t > 0 and 'use_random' in attack.__dict__.keys():\n attack.use_random = False\n\n # 对于名为\"Orthogonal\"的攻击方法,进行特殊处理\n if 'Orthogonal' in type(attack).__name__:\n pertbx.append(attack.perturb(model=model, x=adv_x[~stop_flag], label=red_label))\n else:\n pertbx.append(attack.perturb(model=model, x=adv_x[~stop_flag], label=red_label,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n ))\n # 将所有攻击方法产生的扰动数据合并\n pertbx = torch.vstack(pertbx)\n\n\n # 不需要计算梯度,提高计算效率\n with torch.no_grad():\n # 将真实标签复制若干次以匹配所有的攻击列表\n red_label_ext = torch.cat([red_label] * len(self.attack_list))\n \n # 获取每种攻击方法产生的损失值和成功状态\n loss, done = self.get_scores(model, pertbx, red_label_ext)\n \n # 调整损失和成功状态的形状以方便后续计算\n loss = loss.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n done = done.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n \n # 判断哪些样本至少有一种攻击方法成功\n success_flag = torch.any(done, dim=-1)\n \n # 对于没有成功的样本,将其标记为1以进行后续处理\n done[~torch.any(done, dim=-1)] = 1\n \n # 调整损失值,对于成功的攻击方法,损失值保持不变;对于失败的,损失值变为最小值\n loss = (loss * done.to(torch.float)) + torch.min(loss) * (~done).to(torch.float)\n \n # 调整扰动数据的形状以方便后续计算\n pertbx = pertbx.reshape(len(self.attack_list), num_sample_red, *red_n).permute([1, 0, *red_ind])\n \n # 选择造成最大损失的扰动数据\n _, indices = loss.max(dim=-1)\n adv_x[~stop_flag] = pertbx[torch.arange(num_sample_red), indices]\n \n # 获取选中的扰动数据的损失值\n a_loss = loss[torch.arange(num_sample_red), indices]\n \n # 复制当前的停止标志\n pre_stop_flag = stop_flag.clone()\n \n # 更新停止标志,如果损失值变化很小或者某种攻击方法成功,则停止迭代\n stop_flag[~stop_flag] = (torch.abs(pre_loss[~stop_flag] - a_loss) < self.varepsilon) | success_flag\n \n # 更新前一个损失值\n pre_loss[~pre_stop_flag] = a_loss\n\n # 如果需要打印日志\n if verbose:\n # 评估最终的扰动数据的成功状态\n with torch.no_grad():\n _, done = self.get_scores(model, adv_x, label)\n # 打印攻击成功率\n logger.info(f\"max: attack effectiveness {done.sum().item() / x.size()[0] * 100}%.\")\n\n # 返回最终的扰动数据\n return adv_x\n\n\n def perturb_dae(self, predict_model, purifier, x, label=None, steps_max=5, min_lambda_=1e-5, max_lambda_=1e5, verbose=False, oblivion=False):\n \"\"\"\n 扰动节点特征\n\n 参数\n -----------\n @param model: 受害者模型。\n @param x: torch.FloatTensor, 形状为[batch_size, vocab_dim]的特征向量。\n @param label: torch.LongTensor, 真实标签。\n @param steps_max: Integer, 最大的迭代次数。\n @param min_lambda_: float, 平衡对手检测器的重要性(如果存在)。\n @param max_lambda_: float, 同上。\n @param verbose: Boolean, 是否打印详细日志。\n\n 返回值\n --------\n adv_x: 扰动后的数据。\n \"\"\"\n\n # 判断输入数据是否有效\n if x is None or x.shape[0] <= 0:\n return []\n\n # 将模型设为评估模式,主要是为了禁用一些在训练模式下的特殊层,比如Dropout\n predict_model.eval()\n purifier.eval()\n\n # 获取输入数据x在当前模型下的损失和完成状态\n with torch.no_grad():\n if not oblivion:\n purified_x = purifier(x.detach().clone().float()).to(torch.double)\n else:\n purified_x = x.detach().clone()\n loss, done = self.get_scores(predict_model, purified_x, label)\n\n # 存储当前的损失为前一次的损失\n pre_loss = loss\n\n # 获取输入数据的数量以及其他的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n\n # 初始化攻击样本为输入数据的拷贝\n adv_x = x.detach().clone()\n\n # 初始化停止标志,用于表示哪些样本已经完成了攻击\n stop_flag = torch.zeros(n, dtype=torch.bool, device=self.device)\n\n # 开始主循环,进行多次迭代以改进攻击效果\n for t in range(steps_max):\n # 计算还未完成攻击的样本数量\n num_sample_red = n - torch.sum(stop_flag)\n \n # 如果所有样本都已完成攻击,结束循环\n if num_sample_red <= 0:\n break\n\n # 获取那些还未完成攻击的样本的真实标签\n red_label = label[~stop_flag]\n pertbx = []\n\n # 对于攻击方法列表中的每种攻击方法,尝试对数据进行扰动\n for attack in self.attack_list:\n # 确保每种攻击方法都实现了perturb方法\n assert 'perturb' in type(attack).__dict__.keys()\n\n # 对于某些特定的攻击方法,在第二次及以后的迭代中取消随机化\n if t > 0 and 'use_random' in attack.__dict__.keys():\n attack.use_random = False\n\n # 对于名为\"Orthogonal\"的攻击方法,进行特殊处理\n if 'Orthogonal' in type(attack).__name__:\n pertbx.append(attack.perturb_dae(predict_model=predict_model, purifier=purifier, x=adv_x[~stop_flag], label=red_label, oblivion=oblivion))\n else:\n pertbx.append(attack.perturb_dae(model=predict_model, purifier=purifier, x=adv_x[~stop_flag], label=red_label,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n oblivion=oblivion\n ))\n\n # 将所有攻击方法产生的扰动数据合并\n pertbx = torch.vstack(pertbx)\n\n\n # 不需要计算梯度,提高计算效率\n with torch.no_grad():\n # 将真实标签复制若干次以匹配所有的攻击列表\n red_label_ext = torch.cat([red_label] * len(self.attack_list))\n \n # 获取每种攻击方法产生的损失值和成功状态\n if not oblivion:\n purified_pertbx = purifier(pertbx.detach().clone().float()).to(torch.double)\n else:\n purified_pertbx = pertbx.detach().clone()\n\n loss, done = self.get_scores(predict_model, purified_pertbx, red_label_ext)\n \n # 调整损失和成功状态的形状以方便后续计算\n loss = loss.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n done = done.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n \n # 判断哪些样本至少有一种攻击方法成功\n success_flag = torch.any(done, dim=-1)\n \n # 对于没有成功的样本,将其标记为1以进行后续处理\n done[~torch.any(done, dim=-1)] = 1\n \n # 调整损失值,对于成功的攻击方法,损失值保持不变;对于失败的,损失值变为最小值\n loss = (loss * done.to(torch.float)) + torch.min(loss) * (~done).to(torch.float)\n \n # 调整扰动数据的形状以方便后续计算\n pertbx = pertbx.reshape(len(self.attack_list), num_sample_red, *red_n).permute([1, 0, *red_ind])\n \n # 选择造成最大损失的扰动数据\n _, indices = loss.max(dim=-1)\n adv_x[~stop_flag] = pertbx[torch.arange(num_sample_red), indices]\n \n # 获取选中的扰动数据的损失值\n a_loss = loss[torch.arange(num_sample_red), indices]\n \n # 复制当前的停止标志\n pre_stop_flag = stop_flag.clone()\n \n # 更新停止标志,如果损失值变化很小或者某种攻击方法成功,则停止迭代\n stop_flag[~stop_flag] = (torch.abs(pre_loss[~stop_flag] - a_loss) < self.varepsilon) | success_flag\n \n # 更新前一个损失值\n pre_loss[~pre_stop_flag] = a_loss\n\n # 如果需要打印日志\n if verbose:\n # 评估最终的扰动数据的成功状态\n with torch.no_grad():\n purified_adv_x = purifier(adv_x.detach().clone().float()).to(torch.double)\n _, done = self.get_scores(predict_model, purified_adv_x, label)\n # 打印攻击成功率\n logger.info(f\"max: attack effectiveness {done.sum().item() / x.size()[0] * 100}%.\")\n\n # 返回最终的扰动数据\n return adv_x\n\n\n # 这个get_scores函数的主要目的是计算扰动数据在给定模型上的损失值,并判断模型对这些扰动数据的预测是否成功完成。\n # 对于具有检测器功能的模型,还会考虑模型的额外输出来决定预测的完成状态。\n def get_scores(self, model, pertb_x, label):\n \"\"\"\n 获取扰动数据在模型上的损失值和预测标签的完成状态。\n\n 参数:\n @param model: 模型对象,即受攻击的目标模型。\n @param pertb_x: torch.Tensor,扰动后的数据。\n @param label: torch.Tensor,扰动数据的真实标签。\n\n 返回:\n - loss_no_reduction: 每个样本的损失值(无降维处理)。\n - done: Boolean Tensor,表示模型对每个样本的预测是否成功完成。\n \"\"\"\n # 判断模型是否具有检测器功能,如果有,则获取模型的两个输出:logits_f 和 prob_g。\n if hasattr(model, 'is_detector_enabled'):\n logits_f, prob_g = model.forward(pertb_x)\n else:\n # 如果模型没有检测器功能,只获取一个输出logits_f。\n logits_f = model.forward(pertb_x)\n\n # 使用交叉熵计算每个样本的损失值\n ce = F.cross_entropy(logits_f, label, reduction='none')\n\n # 获取模型的预测标签\n y_pred = logits_f.argmax(1)\n\n # 如果模型具有检测器功能且不处于\"oblivion\"模式,则进行特殊处理。\n # 使用模型的输出prob_g来判断是否成功完成了预测。\n if hasattr(model, 'is_detector_enabled') and (not self.oblivion):\n tau = model.get_tau_sample_wise(y_pred)\n loss_no_reduction = -prob_g\n done = (y_pred != label) & (prob_g <= tau)\n else:\n # 如果模型没有检测器功能或处于\"oblivion\"模式,则使用交叉熵损失来判断是否成功完成了预测。\n loss_no_reduction = ce\n done = y_pred != label\n\n return loss_no_reduction, done" }, { "identifier": "StepwiseMax", "path": "core/attack/stepwise_max.py", "snippet": "class StepwiseMax(BaseAttack):\n \"\"\"\n Stepwise max攻击方法,这是一个结合了pgd l1, pgd l2, 和 pgd linf三种攻击方式的方法。\n\n 参数\n ----------\n @param use_random: bool类型,是否使用随机的起始点。\n @param rounding_threshold: float类型,用于四舍五入实数的阈值。\n @param is_attacker: bool类型,是否扮演攻击者角色(注意:防御者执行对抗性训练)。\n @param oblivion: bool类型,是否知道敌手指示器。\n @param kappa: 攻击信心度。\n @param manipulation_x: 可操作性。\n @param omega: 与每个api相对应的互依赖api的索引。\n @param device: 设备,'cpu'或'cuda'。\n\n \"\"\"\n\n def __init__(self, use_random=False, rounding_threshold=0.5,\n is_attacker=True, oblivion=False, kappa=1., manipulation_x=None, omega=None, device=None):\n super(StepwiseMax, self).__init__(is_attacker, oblivion, kappa, manipulation_x, omega, device)\n \n # 是否使用随机起点\n self.use_random = use_random\n \n # 断言确保四舍五入阈值在(0, 1)之间\n assert 0 < rounding_threshold < 1\n \n # 设置四舍五入的阈值\n self.round_threshold = rounding_threshold\n \n # lambda_用于正则化,通常与优化的损失一起使用\n self.lambda_ = 1.\n\n def perturb_dae(self, model, purifier, x, label=None,\n steps=100,\n step_check=1,\n sl_l1=1.,\n sl_l2=1.,\n sl_linf=0.01,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n is_score_round=True,\n base=10.,\n verbose=False,\n oblivion=False):\n \"\"\"\n 对模型进行增强攻击。\n\n @param model: PyTorch模型,待攻击目标。\n @param x: Tensor, 原始输入数据。\n @param label: Tensor或None, 输入数据对应的标签。\n @param steps: int, 攻击的总步数。\n @param step_check: int, 检查间隔,即多少步进行一次检查。\n @param sl_l1: float, L1范数的步长。\n @param sl_l2: float, L2范数的步长。\n @param sl_linf: float, Linf范数的步长。\n @param min_lambda_: float, lambda的最小值。\n @param max_lambda_: float, lambda的最大值。\n @param is_score_round: Boolean, 是否对分数进行四舍五入。\n @param base: float, 基数。\n @param verbose: Boolean, 是否输出详细信息。\n \"\"\"\n # torch.manual_seed(int(random.random() * 100)) # 设置随机种子\n # 参数校验\n assert 0 < min_lambda_ <= max_lambda_\n assert steps >= 0 and (step_check >= 1) and 1 >= sl_l1 > 0 and sl_l2 >= 0 and sl_linf >= 0\n \n model.eval() # 将模型设置为评估模式\n purifier.eval()\n \n # 根据模型是否具有某种属性来设置lambda的初值\n if hasattr(model, 'is_detector_enabled'):\n self.lambda_ = min_lambda_\n else:\n self.lambda_ = max_lambda_\n \n # 如果不是攻击者,从预定义的步骤中随机选择一个\n if not self.is_attacker:\n step_checks = [1, 10, 25, 50]\n step_check = random.choice(step_checks)\n \n # 计算每个小步骤中需要的迭代次数\n mini_steps = [step_check] * (steps // step_check)\n mini_steps = mini_steps + [steps % step_check] if steps % step_check != 0 else mini_steps\n \n # 获取输入的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n \n adv_x = x.detach().clone() # 获取输入数据的副本\n while self.lambda_ <= max_lambda_:\n pert_x_cont = None\n prev_done = None\n for i, mini_step in enumerate(mini_steps):\n with torch.no_grad():\n # 如果是第一步并且启用了随机初始化,那么获取一个随机的起始点\n if i == 0:\n adv_x = get_x0(adv_x, rounding_threshold=self.round_threshold, is_sample=True)\n # 计算损失和完成标志\n if not oblivion:\n purified_adv = purifier(adv_x.detach().clone().float()).to(torch.double)\n else:\n purified_adv = adv_x.detach().clone()\n _, done = self.get_loss(model, purified_adv, label, self.lambda_)\n \n # print(\"done:\", done)\n \n # 如果所有的都完成了,就退出循环\n if torch.all(done):\n break\n \n # 对于那些没有完成的数据,重新计算扰动\n # print(\"i:\", i)\n if i == 0:\n # print(\"~done:\", (~done))\n adv_x[~done] = x[~done]\n prev_done = done.clone()\n else:\n if (adv_x[~done]).shape[0] == (pert_x_cont[~done[~prev_done]]).shape[0]:\n adv_x[~done] = pert_x_cont[~done[~prev_done]]\n else:\n updated_mask = (~done) & (~prev_done[:len(done)])\n num_to_select = updated_mask.sum().item()\n selected_perturbations = pert_x_cont[:num_to_select]\n adv_x[updated_mask] = selected_perturbations\n\n prev_done = done.clone() \n \n # 对那些未完成的数据进行真正的扰动\n num_sample_red = torch.sum(~done).item()\n pert_x_l1, pert_x_l2, pert_x_linf = self._perturb_dae(model, purifier, adv_x[~done], label[~done],\n mini_step,\n sl_l1,\n sl_l2,\n sl_linf,\n lambda_=self.lambda_,\n oblivion=False\n )\n # print(\"pert_x_l1, pert_x_l2, pert_x_linf\", pert_x_l1, pert_x_l2, pert_x_linf)\n # 不计算梯度地执行下列操作\n with torch.no_grad():\n # 构造一个包含三种扰动的列表\n pertb_x_list = [pert_x_linf, pert_x_l2, pert_x_l1]\n n_attacks = len(pertb_x_list) # 获取攻击的数量(即3)\n pertbx = torch.vstack(pertb_x_list) # 垂直堆叠这三种扰动\n label_ext = torch.cat([label[~done]] * n_attacks) # 扩展标签列表,使其与扰动列表长度匹配\n\n # 如果不是攻击者并且不需要四舍五入得分,则获取得分\n # 否则,先对扰动进行四舍五入,再获取得分\n if not oblivion:\n purified_pertbx = purifier(pertbx.detach().clone().float()).to(torch.double)\n else:\n purified_pertbx = pertbx.detach().clone()\n if (not self.is_attacker) and (not is_score_round): \n scores, _done = self.get_scores(model, purified_pertbx, label_ext)\n else:\n scores, _done = self.get_scores(model, round_x(purified_pertbx, self.round_threshold), label_ext)\n \n # 如果得分的最大值大于0,则设置为该值,否则设置为0\n max_v = scores.amax() if scores.amax() > 0 else 0.\n scores[_done] += max_v # 对完成的得分增加max_v\n\n # 重新整形扰动和得分张量,以便后续操作\n pertbx = pertbx.reshape(n_attacks, num_sample_red, *red_n).permute([1, 0, *red_ind])\n scores = scores.reshape(n_attacks, num_sample_red).permute(1, 0)\n\n # 从得分张量中获取最大得分及其索引\n _2, s_idx = scores.max(dim=-1)\n # 使用索引从扰动张量中选择具有最高误导性的扰动\n pert_x_cont = pertbx[torch.arange(num_sample_red), s_idx]\n # print(\"pert_x_cont.shape\", pert_x_cont.shape)\n # 更新经过扰动的数据adv_x\n adv_x[~done] = pert_x_cont if not self.is_attacker else round_x(pert_x_cont, self.round_threshold)\n \n # 更新lambda值以便于下一次循环\n self.lambda_ *= base\n # 如果lambda值检查失败,则中断循环\n if not self.check_lambda(model):\n break\n # 如果是攻击者,对最终的扰动结果进行四舍五入\n if self.is_attacker:\n adv_x = round_x(adv_x, self.round_threshold)\n \n # 不计算梯度地获取最后的损失和完成标志\n with torch.no_grad():\n purified_adv = purifier(adv_x.detach().clone().float()).to(torch.double)\n _, done = self.get_loss(model, purified_adv, label, self.lambda_)\n # 如果设置了详细输出,打印攻击效果的百分比\n if verbose:\n logger.info(f\"step-wise max: attack effectiveness {done.sum().item() / done.size()[0] * 100:.3f}%.\")\n # 返回扰动后的数据\n return adv_x\n\n\n def perturb(self, model, x, label=None,\n steps=100,\n step_check=1,\n sl_l1=1.,\n sl_l2=1.,\n sl_linf=0.01,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n is_score_round=True,\n base=10.,\n verbose=False):\n \"\"\"\n 对模型进行增强攻击。\n\n @param model: PyTorch模型,待攻击目标。\n @param x: Tensor, 原始输入数据。\n @param label: Tensor或None, 输入数据对应的标签。\n @param steps: int, 攻击的总步数。\n @param step_check: int, 检查间隔,即多少步进行一次检查。\n @param sl_l1: float, L1范数的步长。\n @param sl_l2: float, L2范数的步长。\n @param sl_linf: float, Linf范数的步长。\n @param min_lambda_: float, lambda的最小值。\n @param max_lambda_: float, lambda的最大值。\n @param is_score_round: Boolean, 是否对分数进行四舍五入。\n @param base: float, 基数。\n @param verbose: Boolean, 是否输出详细信息。\n \"\"\"\n # torch.manual_seed(int(random.random() * 100)) # 设置随机种子\n # 参数校验\n assert 0 < min_lambda_ <= max_lambda_\n assert steps >= 0 and (step_check >= 1) and 1 >= sl_l1 > 0 and sl_l2 >= 0 and sl_linf >= 0\n \n model.eval() # 将模型设置为评估模式\n \n # 根据模型是否具有某种属性来设置lambda的初值\n if hasattr(model, 'is_detector_enabled'):\n self.lambda_ = min_lambda_\n else:\n self.lambda_ = max_lambda_\n \n # 如果不是攻击者,从预定义的步骤中随机选择一个\n if not self.is_attacker:\n step_checks = [1, 10, 25, 50]\n step_check = random.choice(step_checks)\n \n # 计算每个小步骤中需要的迭代次数\n mini_steps = [step_check] * (steps // step_check)\n mini_steps = mini_steps + [steps % step_check] if steps % step_check != 0 else mini_steps\n \n # 获取输入的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n \n adv_x = x.detach().clone() # 获取输入数据的副本\n while self.lambda_ <= max_lambda_:\n pert_x_cont = None\n prev_done = None\n for i, mini_step in enumerate(mini_steps):\n with torch.no_grad():\n # 如果是第一步并且启用了随机初始化,那么获取一个随机的起始点\n if i == 0:\n adv_x = get_x0(adv_x, rounding_threshold=self.round_threshold, is_sample=True)\n _, done = self.get_loss(model, adv_x, label, self.lambda_)\n \n # print(\"done:\", done)\n \n # 如果所有的都完成了,就退出循环\n if torch.all(done):\n break\n \n # 对于那些没有完成的数据,重新计算扰动\n # print(\"i:\", i)\n if i == 0:\n # print(\"~done:\", (~done))\n adv_x[~done] = x[~done]\n prev_done = done.clone()\n else:\n if (adv_x[~done]).shape[0] == (pert_x_cont[~done[~prev_done]]).shape[0]:\n adv_x[~done] = pert_x_cont[~done[~prev_done]]\n else:\n updated_mask = (~done) & (~prev_done[:len(done)])\n num_to_select = updated_mask.sum().item()\n selected_perturbations = pert_x_cont[:num_to_select]\n adv_x[updated_mask] = selected_perturbations\n\n prev_done = done.clone() \n \n # 对那些未完成的数据进行真正的扰动\n num_sample_red = torch.sum(~done).item()\n pert_x_l1, pert_x_l2, pert_x_linf = self._perturb(model, adv_x[~done], label[~done],\n mini_step,\n sl_l1,\n sl_l2,\n sl_linf,\n lambda_=self.lambda_\n )\n # print(\"pert_x_l1, pert_x_l2, pert_x_linf\", pert_x_l1, pert_x_l2, pert_x_linf)\n # 不计算梯度地执行下列操作\n with torch.no_grad():\n # 构造一个包含三种扰动的列表\n pertb_x_list = [pert_x_linf, pert_x_l2, pert_x_l1]\n n_attacks = len(pertb_x_list) # 获取攻击的数量(即3)\n pertbx = torch.vstack(pertb_x_list) # 垂直堆叠这三种扰动\n label_ext = torch.cat([label[~done]] * n_attacks) # 扩展标签列表,使其与扰动列表长度匹配\n\n # 如果不是攻击者并且不需要四舍五入得分,则获取得分\n # 否则,先对扰动进行四舍五入,再获取得分\n if (not self.is_attacker) and (not is_score_round):\n scores, _done = self.get_scores(model, pertbx, label_ext)\n else:\n scores, _done = self.get_scores(model, round_x(pertbx, self.round_threshold), label_ext)\n \n # 如果得分的最大值大于0,则设置为该值,否则设置为0\n max_v = scores.amax() if scores.amax() > 0 else 0.\n scores[_done] += max_v # 对完成的得分增加max_v\n\n # 重新整形扰动和得分张量,以便后续操作\n pertbx = pertbx.reshape(n_attacks, num_sample_red, *red_n).permute([1, 0, *red_ind])\n scores = scores.reshape(n_attacks, num_sample_red).permute(1, 0)\n\n # 从得分张量中获取最大得分及其索引\n _2, s_idx = scores.max(dim=-1)\n # 使用索引从扰动张量中选择具有最高误导性的扰动\n pert_x_cont = pertbx[torch.arange(num_sample_red), s_idx]\n # print(\"pert_x_cont.shape\", pert_x_cont.shape)\n # 更新经过扰动的数据adv_x\n adv_x[~done] = pert_x_cont if not self.is_attacker else round_x(pert_x_cont, self.round_threshold)\n \n # 更新lambda值以便于下一次循环\n self.lambda_ *= base\n # 如果lambda值检查失败,则中断循环\n if not self.check_lambda(model):\n break\n # 如果是攻击者,对最终的扰动结果进行四舍五入\n if self.is_attacker:\n adv_x = round_x(adv_x, self.round_threshold)\n \n # 不计算梯度地获取最后的损失和完成标志\n with torch.no_grad():\n _, done = self.get_loss(model, adv_x, label, self.lambda_)\n # 如果设置了详细输出,打印攻击效果的百分比\n if verbose:\n logger.info(f\"step-wise max: attack effectiveness {done.sum().item() / done.size()[0] * 100:.3f}%.\")\n # 返回扰动后的数据\n return adv_x\n\n def _perturb(self, model, x, label=None,\n steps=1,\n step_length_l1=1.,\n step_length_l2=0.5,\n step_length_linf=0.01,\n lambda_=1.,\n ):\n \"\"\"\n 对节点的特征向量进行扰动\n\n 参数\n -----------\n @param model: 受害者模型\n @param x: torch.FloatTensor, 节点特征向量(每个表示一个图中的API出现次数)形状为 [batch_size, vocab_dim]\n @param label: torch.LongTensor, 真实的标签\n @param steps: 整数, 迭代的最大次数\n @param step_length_l1: 每次迭代的步长,L1范数\n @param step_length_l2: 每次迭代的步长,L2范数\n @param step_length_linf: 每次迭代的步长,Linf范数\n @param lambda_: 浮点数, 惩罚因子\n \"\"\"\n if x is None or x.shape[0] <= 0:\n return []\n \n self.lambda_ = lambda_\n \n # 确保L1步长在[0,1]之间\n assert 0 <= step_length_l1 <= 1, \"期望在 [0,1] 之间的实数值,但得到 {}\".format(step_length_l1)\n model.eval()\n adv_x = x.detach()\n \n def one_iteration(_adv_x, norm_type):\n # 基于当前的扰动输入来计算梯度\n if \"rnn\" in model.model_save_path:\n model.train()\n if \"lstm\" in model.model_save_path:\n model.train() \n var_adv_x = torch.autograd.Variable(_adv_x, requires_grad=True) # 将_adv_x转换为一个可以进行自动梯度计算的变量\n loss, done = self.get_loss(model, var_adv_x, label, self.lambda_) # 获取模型在扰动输入上的损失\n grads = torch.autograd.grad(loss.mean(), var_adv_x, allow_unused=True)\n if grads[0] is None:\n grad = torch.zeros_like(var_adv_x)\n else:\n grad = grads[0].data\n\n # 寻找允许的位置来插入和移除API\n pos_insertion = (_adv_x <= 0.5) * 1 * (_adv_x >= 0.) # 寻找API的可插入位置:特征值在0和0.5之间\n grad4insertion = (grad > 0) * pos_insertion * grad # 根据梯度正值计算插入API的梯度\n\n pos_removal = (_adv_x > 0.5) * 1 # 寻找API的可移除位置:特征值大于0.5\n grad4removal = (grad <= 0) * (pos_removal & self.manipulation_x) * grad # 根据梯度负值计算移除API的梯度\n\n if self.is_attacker:\n # 对于攻击者,处理那些互相依赖的API\n checking_nonexist_api = (pos_removal ^ self.omega) & self.omega # 检查不存在的API\n grad4removal[:, self.api_flag] += torch.sum(grad * checking_nonexist_api, dim=-1, keepdim=True) # 考虑API之间的关系,调整移除API的梯度\n\n # 合并插入和移除的梯度\n grad = grad4removal + grad4insertion\n\n # 根据不同的范数类型,计算扰动值\n if norm_type == 'linf':\n perturbation = torch.sign(grad) # 计算梯度符号来获取无穷范数扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_linf * perturbation, min=0., max=1.) # 应用扰动并确保结果在[0,1]范围内\n\n elif norm_type == 'l2':\n l2norm = torch.linalg.norm(grad, dim=-1, keepdim=True) # 计算L2范数\n perturbation = torch.minimum(\n torch.tensor(1., dtype=_adv_x.dtype, device=_adv_x.device),\n grad / l2norm\n ) # 计算L2范数下的扰动方向\n perturbation = torch.where(torch.isnan(perturbation), 0., perturbation) # 处理NaN值\n perturbation = torch.where(torch.isinf(perturbation), 1., perturbation) # 处理Inf值\n if self.is_attacker:\n min_val = torch.amin(perturbation, dim=-1, keepdim=True).clamp_(max=0.)\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * torch.abs(min_val) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l2 * perturbation, min=0., max=1.)\n\n elif norm_type == 'l1':\n val, idx = torch.abs(grad).topk(int(1. / step_length_l1), dim=-1) # 获取梯度的绝对值的top-k值和相应的索引\n perturbation = F.one_hot(idx, num_classes=_adv_x.shape[-1]).sum(dim=1) # 根据索引计算L1范数下的扰动方向\n perturbation = torch.sign(grad) * perturbation # 使用梯度的符号来调整扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l1 * perturbation, min=0., max=1.)\n\n else:\n raise NotImplementedError # 如果范数类型不在L1、L2、Linf中,则引发异常\n\n\n # 为每种范数执行迭代\n adv_x_l1 = adv_x.clone()\n for t in range(steps):\n adv_x_l1 = one_iteration(adv_x_l1, norm_type='l1')\n \n adv_x_l2 = adv_x.clone()\n for t in range(steps):\n adv_x_l2 = one_iteration(adv_x_l2, norm_type='l2')\n \n adv_x_linf = adv_x.clone()\n for t in range(steps):\n adv_x_linf = one_iteration(adv_x_linf, norm_type='linf')\n \n return adv_x_l1, adv_x_l2, adv_x_linf\n\n\n def _perturb_dae(self, model, purifier, x, label=None,\n steps=1,\n step_length_l1=1.,\n step_length_l2=0.5,\n step_length_linf=0.01,\n lambda_=1.,\n oblivion=False):\n \"\"\"\n 对节点的特征向量进行扰动\n\n 参数\n -----------\n @param model: 受害者模型\n @param x: torch.FloatTensor, 节点特征向量(每个表示一个图中的API出现次数)形状为 [batch_size, vocab_dim]\n @param label: torch.LongTensor, 真实的标签\n @param steps: 整数, 迭代的最大次数\n @param step_length_l1: 每次迭代的步长,L1范数\n @param step_length_l2: 每次迭代的步长,L2范数\n @param step_length_linf: 每次迭代的步长,Linf范数\n @param lambda_: 浮点数, 惩罚因子\n \"\"\"\n if x is None or x.shape[0] <= 0:\n return []\n \n self.lambda_ = lambda_\n \n # 确保L1步长在[0,1]之间\n assert 0 <= step_length_l1 <= 1, \"期望在 [0,1] 之间的实数值,但得到 {}\".format(step_length_l1)\n model.eval()\n adv_x = x.detach()\n \n\n def one_iteration(_adv_x, norm_type):\n # 基于当前的扰动输入来计算梯度\n var_adv_x = torch.autograd.Variable(_adv_x, requires_grad=True) # 将_adv_x转换为一个可以进行自动梯度计算的变量\n if not oblivion:\n purified_var = purifier(var_adv_x.detach().clone().float()).to(torch.double)\n else:\n purified_var = var_adv_x.detach().clone()\n loss, done = self.get_loss(model, purified_var, label, self.lambda_) # 获取模型在扰动输入上的损失\n grads = torch.autograd.grad(loss.mean(), var_adv_x, allow_unused=True)\n if grads[0] is None:\n grad = torch.zeros_like(var_adv_x)\n else:\n grad = grads[0].data\n\n # 寻找允许的位置来插入和移除API\n pos_insertion = (_adv_x <= 0.5) * 1 * (_adv_x >= 0.) # 寻找API的可插入位置:特征值在0和0.5之间\n grad4insertion = (grad > 0) * pos_insertion * grad # 根据梯度正值计算插入API的梯度\n\n pos_removal = (_adv_x > 0.5) * 1 # 寻找API的可移除位置:特征值大于0.5\n grad4removal = (grad <= 0) * (pos_removal & self.manipulation_x) * grad # 根据梯度负值计算移除API的梯度\n\n if self.is_attacker:\n # 对于攻击者,处理那些互相依赖的API\n checking_nonexist_api = (pos_removal ^ self.omega) & self.omega # 检查不存在的API\n grad4removal[:, self.api_flag] += torch.sum(grad * checking_nonexist_api, dim=-1, keepdim=True) # 考虑API之间的关系,调整移除API的梯度\n\n # 合并插入和移除的梯度\n grad = grad4removal + grad4insertion\n\n # 根据不同的范数类型,计算扰动值\n if norm_type == 'linf':\n perturbation = torch.sign(grad) # 计算梯度符号来获取无穷范数扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_linf * perturbation, min=0., max=1.) # 应用扰动并确保结果在[0,1]范围内\n\n elif norm_type == 'l2':\n l2norm = torch.linalg.norm(grad, dim=-1, keepdim=True) # 计算L2范数\n perturbation = torch.minimum(\n torch.tensor(1., dtype=_adv_x.dtype, device=_adv_x.device),\n grad / l2norm\n ) # 计算L2范数下的扰动方向\n perturbation = torch.where(torch.isnan(perturbation), 0., perturbation) # 处理NaN值\n perturbation = torch.where(torch.isinf(perturbation), 1., perturbation) # 处理Inf值\n if self.is_attacker:\n min_val = torch.amin(perturbation, dim=-1, keepdim=True).clamp_(max=0.)\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * torch.abs(min_val) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l2 * perturbation, min=0., max=1.)\n\n elif norm_type == 'l1':\n val, idx = torch.abs(grad).topk(int(1. / step_length_l1), dim=-1) # 获取梯度的绝对值的top-k值和相应的索引\n perturbation = F.one_hot(idx, num_classes=_adv_x.shape[-1]).sum(dim=1) # 根据索引计算L1范数下的扰动方向\n perturbation = torch.sign(grad) * perturbation # 使用梯度的符号来调整扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l1 * perturbation, min=0., max=1.)\n\n else:\n raise NotImplementedError # 如果范数类型不在L1、L2、Linf中,则引发异常\n\n\n # 为每种范数执行迭代\n adv_x_l1 = adv_x.clone()\n for t in range(steps):\n adv_x_l1 = one_iteration(adv_x_l1, norm_type='l1')\n \n adv_x_l2 = adv_x.clone()\n for t in range(steps):\n adv_x_l2 = one_iteration(adv_x_l2, norm_type='l2')\n \n adv_x_linf = adv_x.clone()\n for t in range(steps):\n adv_x_linf = one_iteration(adv_x_linf, norm_type='linf')\n \n return adv_x_l1, adv_x_l2, adv_x_linf\n\n def get_scores(self, model, pertb_x, label):\n # 如果模型有 'is_detector_enabled' 这个属性\n if hasattr(model, 'is_detector_enabled'):\n # 获取模型的输出,logits_f 是模型的原始输出,prob_g 是一个概率值\n logits_f, prob_g = model.forward(pertb_x)\n else:\n # 如果模型没有 'is_detector_enabled' 这个属性,只获取模型的原始输出\n logits_f = model.forward(pertb_x)\n\n # 获取预测的类别\n y_pred = logits_f.argmax(1)\n \n # 计算交叉熵损失\n ce = F.cross_entropy(logits_f, label, reduction='none')\n \n # 如果模型有 'is_detector_enabled' 这个属性,并且 self.oblivion 为 False\n if hasattr(model, 'is_detector_enabled') and (not self.oblivion):\n # 获取样本的阈值\n tau = model.get_tau_sample_wise(y_pred)\n # 计算损失,加入了 prob_g 这个概率值的惩罚项\n loss_no_reduction = ce - self.lambda_ * prob_g\n # 判断预测是否错误,并且 prob_g 是否小于等于阈值 tau\n done = (y_pred != label) & (prob_g <= tau)\n else:\n # 如果没有 'is_detector_enabled' 这个属性或 self.oblivion 为 True,损失仍然是交叉熵损失\n loss_no_reduction = ce\n # 判断预测是否错误\n done = y_pred != label\n\n # 返回损失值和判断结果c\n return loss_no_reduction, done" }, { "identifier": "config", "path": "config.py", "snippet": "def parser_config():" }, { "identifier": "utils", "path": "tools/utils.py", "snippet": "ENC_KEY = 'cab228a122d3486bac7fab148e8b5aba'\n MSG = \"No such directory or file {} exists!\".format(sample_dir)\n MSG = \"A directory or a list of paths are allowed!\"\ndef pool_initializer():\ndef retrive_files_set(base_dir, dir_ext, file_ext):\n def get_file_name(root_dir, file_ext):\ndef check_dir(sample_dir):\ndef dump_joblib(data, path):\ndef read_joblib(path):\ndef load_json(json_path):\ndef dump_json(obj_dict, file_path):\ndef dump_pickle(data, path, use_gzip=False):\ndef read_pickle(path, use_gzip=False):\ndef dump_pickle_frd_space(data, path):\ndef read_pickle_frd_space(path):\ndef dump_list_of_lists(data, path):\ndef read_list_of_lists(path):\ndef mkdir(target):\ndef read_txt(path, mode='r'):\ndef dump_txt(data_str, path, mode='w'):\ndef read_file_by_fileinput(file_path, inplace=True):\n def __init__(self, manager, use_cache=True):\n def is_cached(self, key):\n def reset(self):\n def get(self, key):\n def cache(self, key, img, lbl):\ndef build_kwargs(keys, arg_dict):\ndef inverse_kwargs(vars):\ndef save_args(fout, args):\ndef load_args(fout):\ndef get_group_args(args, args_parser, title):\ndef tensor_coo_sp_to_ivs(sparse_tensor):\ndef ivs_to_tensor_coo_sp(ivs, device='cpu'):\ndef sp_to_symmetric_sp(sparse_mx):\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\ndef to_tensor(feature_x=None, labels=None, device='cpu'):\n def _to_torch_tensor(mat):\ndef to_device(feature_x=None, labels=None, device='cpu'):\ndef psn(x_tensor, prob, lower_value=0., upper_value=1.):\n def __init__(self):\n def __call__(self, module):\ndef round_x(x, alpha=0.5):\ndef get_x0(x, rounding_threshold=0.5, is_sample=False):\ndef or_tensors(x_1, x_2):\ndef xor_tensors(x_1, x_2):\ndef get_mal_data(x_batch, y_batch):\ndef get_mal_ben_data(x_batch, y_batch):\ndef java_class_name2smali_name(cls):\ndef remove_duplicate(components):\ndef crypt_identifier(idf, seed=2345):\n def md5_transform():\ndef random_string(code):\n def sha1_transform():\ndef string_on_code(code):\n def md5_transform():\ndef random_name(seed=2345, code='abc'):\ndef apply_encryption(base_string):\ndef get_sha256(file_path):\nclass SimplifyClass:\nclass NonnegWeightConstraint(object):" } ]
import os.path as path import random import time import torch import torch.optim as optim import numpy as np from core.attack.max import Max from core.attack.stepwise_max import StepwiseMax from config import config, logging, ErrorHandler from tools import utils
14,036
""" max adversarial training framework """ from __future__ import absolute_import from __future__ import division from __future__ import print_function
""" max adversarial training framework """ from __future__ import absolute_import from __future__ import division from __future__ import print_function
logger = logging.getLogger('core.defense.max_adv_training')
2
2023-11-27 02:00:23+00:00
16k
Vali-98/XTTS-RVC-UI
rvc.py
[ { "identifier": "SynthesizerTrnMs256NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs256NSFsid_nono", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid_nono", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "VC", "path": "vc_infer_pipeline.py", "snippet": "class VC(object):\n def __init__(self, tgt_sr, config):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n\n # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)\n def get_optimal_torch_device(self, index: int = 0) -> torch.device:\n # Get cuda device\n if torch.cuda.is_available():\n return torch.device(\n f\"cuda:{index % torch.cuda.device_count()}\"\n ) # Very fast\n elif torch.backends.mps.is_available():\n return torch.device(\"mps\")\n # Insert an else here to grab \"xla\" devices if available. TO DO later. Requires the torch_xla.core.xla_model library\n # Else wise return the \"cpu\" as a torch device,\n return torch.device(\"cpu\")\n\n # Fork Feature: Compute f0 with the crepe method\n def get_f0_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n p_len,\n hop_length=160, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.\n model=\"full\", # Either use crepe-tiny \"tiny\" or crepe \"full\". Default is full\n ):\n x = x.astype(\n np.float32\n ) # fixes the F.conv2D exception. We needed to convert double to float.\n x /= np.quantile(np.abs(x), 0.999)\n torch_device = self.get_optimal_torch_device()\n audio = torch.from_numpy(x).to(torch_device, copy=True)\n audio = torch.unsqueeze(audio, dim=0)\n if audio.ndim == 2 and audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True).detach()\n audio = audio.detach()\n print(\"Initiating prediction with a crepe_hop_length of: \" + str(hop_length))\n pitch: Tensor = torchcrepe.predict(\n audio,\n self.sr,\n hop_length,\n f0_min,\n f0_max,\n model,\n batch_size=hop_length * 2,\n device=torch_device,\n pad=True,\n )\n p_len = p_len or x.shape[0] // hop_length\n # Resize the pitch for final f0\n source = np.array(pitch.squeeze(0).cpu().float().numpy())\n source[source < 0.001] = np.nan\n target = np.interp(\n np.arange(0, len(source) * p_len, len(source)) / p_len,\n np.arange(0, len(source)),\n source,\n )\n f0 = np.nan_to_num(target)\n return f0 # Resized f0\n\n def get_f0_official_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n model=\"full\",\n ):\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n return f0\n\n # Fork Feature: Compute pYIN f0 method\n def get_f0_pyin_computation(self, x, f0_min, f0_max):\n y, sr = librosa.load(\"saudio/Sidney.wav\", self.sr, mono=True)\n f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max)\n f0 = f0[1:] # Get rid of extra first frame\n return f0\n\n # Fork Feature: Acquire median hybrid f0 estimation calculation\n def get_f0_hybrid_computation(\n self,\n methods_str,\n input_audio_path,\n x,\n f0_min,\n f0_max,\n p_len,\n filter_radius,\n crepe_hop_length,\n time_step,\n ):\n # Get various f0 methods from input to use in the computation stack\n s = methods_str\n s = s.split(\"hybrid\")[1]\n s = s.replace(\"[\", \"\").replace(\"]\", \"\")\n methods = s.split(\"+\")\n f0_computation_stack = []\n\n print(\"Calculating f0 pitch estimations for methods: %s\" % str(methods))\n x = x.astype(np.float32)\n x /= np.quantile(np.abs(x), 0.999)\n # Get f0 calculations for all methods specified\n for method in methods:\n f0 = None\n if method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)\n f0 = f0[1:] # Get rid of extra first frame\n elif method == \"crepe-tiny\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, \"tiny\")\n f0 = f0[1:] # Get rid of extra first frame\n elif method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length\n )\n elif method == \"mangio-crepe-tiny\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length, \"tiny\"\n )\n elif method == \"harvest\":\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n f0 = f0[1:] # Get rid of first frame.\n elif method == \"dio\": # Potentially buggy?\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n f0 = f0[1:]\n # elif method == \"pyin\": Not Working just yet\n # f0 = self.get_f0_pyin_computation(x, f0_min, f0_max)\n # Push method to the stack\n f0_computation_stack.append(f0)\n\n for fc in f0_computation_stack:\n print(len(fc))\n\n print(\"Calculating hybrid median f0 from the stack of: %s\" % str(methods))\n f0_median_hybrid = None\n if len(f0_computation_stack) == 1:\n f0_median_hybrid = f0_computation_stack[0]\n else:\n f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)\n return f0_median_hybrid\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n crepe_hop_length,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"dio\": # Potentially Buggy?\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)\n elif f0_method == \"crepe-tiny\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, \"tiny\")\n elif f0_method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length\n )\n elif f0_method == \"mangio-crepe-tiny\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length, \"tiny\"\n )\n elif f0_method == \"rmvpe\":\n if hasattr(self, \"model_rmvpe\") == False:\n from rmvpe import RMVPE\n\n self.model_rmvpe = RMVPE(\n './models/rmvpe.pt', is_half=self.is_half, device=self.device\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n elif \"hybrid\" in f0_method:\n # Perform hybrid median pitch estimation\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = self.get_f0_hybrid_computation(\n f0_method,\n input_audio_path,\n x,\n f0_min,\n f0_max,\n p_len,\n filter_radius,\n crepe_hop_length,\n time_step,\n )\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int_)\n\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch != None and pitchf != None:\n feats0 = feats.clone()\n if (\n isinstance(index, type(None)) == False\n and isinstance(big_npy, type(None)) == False\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch != None and pitchf != None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch != None and pitchf != None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch != None and pitchf != None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n if pitch != None and pitchf != None:\n audio1 = (\n (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])\n .data.cpu()\n .float()\n .numpy()\n )\n else:\n audio1 = (\n (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()\n )\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[0] += t1 - t0\n times[2] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n # file_big_npy,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n crepe_hop_length,\n f0_file=None,\n ):\n if (\n file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index) == True\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += audio_pad[i : i - self.window]\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n np.abs(audio_sum[t - self.t_query : t + self.t_query])\n == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\") == True:\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n crepe_hop_length,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if self.device == \"mps\":\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[1] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if resample_sr >= 16000 and tgt_sr != resample_sr:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt" } ]
from multiprocessing import cpu_count from pathlib import Path from fairseq import checkpoint_utils from scipy.io import wavfile from infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from vc_infer_pipeline import VC import torch import librosa import numpy as np
11,057
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) else:
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
1
2023-11-30 08:47:28+00:00
16k
ubc-vision/nf-soft-mining
examples/utils.py
[ { "identifier": "OccGridEstimator", "path": "nerfacc/estimators/occ_grid.py", "snippet": "class OccGridEstimator(AbstractEstimator):\n \"\"\"Occupancy grid transmittance estimator for spatial skipping.\n\n References: \"Instant Neural Graphics Primitives.\"\n\n Args:\n roi_aabb: The axis-aligned bounding box of the region of interest. Useful for mapping\n the 3D space to the grid.\n resolution: The resolution of the grid. If an integer is given, the grid is assumed to\n be a cube. Otherwise, a list or a tensor of shape (3,) is expected. Default: 128.\n levels: The number of levels of the grid. Default: 1.\n \"\"\"\n\n DIM: int = 3\n\n def __init__(\n self,\n roi_aabb: Union[List[int], Tensor],\n resolution: Union[int, List[int], Tensor] = 128,\n levels: int = 1,\n **kwargs,\n ) -> None:\n super().__init__()\n\n if \"contraction_type\" in kwargs:\n raise ValueError(\n \"`contraction_type` is not supported anymore for nerfacc >= 0.4.0.\"\n )\n\n # check the resolution is legal\n if isinstance(resolution, int):\n resolution = [resolution] * self.DIM\n if isinstance(resolution, (list, tuple)):\n resolution = torch.tensor(resolution, dtype=torch.int32)\n assert isinstance(resolution, Tensor), f\"Invalid type: {resolution}!\"\n assert resolution.shape[0] == self.DIM, f\"Invalid shape: {resolution}!\"\n\n # check the roi_aabb is legal\n if isinstance(roi_aabb, (list, tuple)):\n roi_aabb = torch.tensor(roi_aabb, dtype=torch.float32)\n assert isinstance(roi_aabb, Tensor), f\"Invalid type: {roi_aabb}!\"\n assert roi_aabb.shape[0] == self.DIM * 2, f\"Invalid shape: {roi_aabb}!\"\n\n # multiple levels of aabbs\n aabbs = torch.stack(\n [_enlarge_aabb(roi_aabb, 2**i) for i in range(levels)], dim=0\n )\n\n # total number of voxels\n self.cells_per_lvl = int(resolution.prod().item())\n self.levels = levels\n\n # Buffers\n self.register_buffer(\"resolution\", resolution) # [3]\n self.register_buffer(\"aabbs\", aabbs) # [n_aabbs, 6]\n self.register_buffer(\n \"occs\", torch.zeros(self.levels * self.cells_per_lvl)\n )\n self.register_buffer(\n \"binaries\",\n torch.zeros([levels] + resolution.tolist(), dtype=torch.bool),\n )\n\n # Grid coords & indices\n grid_coords = _meshgrid3d(resolution).reshape(\n self.cells_per_lvl, self.DIM\n )\n self.register_buffer(\"grid_coords\", grid_coords, persistent=False)\n grid_indices = torch.arange(self.cells_per_lvl)\n self.register_buffer(\"grid_indices\", grid_indices, persistent=False)\n\n @torch.no_grad()\n def sampling(\n self,\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # sigma/alpha function for skipping invisible space\n sigma_fn: Optional[Callable] = None,\n alpha_fn: Optional[Callable] = None,\n near_plane: float = 0.0,\n far_plane: float = 1e10,\n t_min: Optional[Tensor] = None, # [n_rays]\n t_max: Optional[Tensor] = None, # [n_rays]\n # rendering options\n render_step_size: float = 1e-3,\n early_stop_eps: float = 1e-4,\n alpha_thre: float = 0.0,\n stratified: bool = False,\n cone_angle: float = 0.0,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Sampling with spatial skipping.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: Ray origins of shape (n_rays, 3).\n rays_d: Normalized ray directions of shape (n_rays, 3).\n sigma_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `sigma_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation density values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n alpha_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `alpha_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation opacity values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n near_plane: Optional. Near plane distance. Default: 0.0.\n far_plane: Optional. Far plane distance. Default: 1e10.\n t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).\n If profided, the marching will start from maximum of t_min and near_plane.\n t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).\n If profided, the marching will stop by minimum of t_max and far_plane.\n render_step_size: Step size for marching. Default: 1e-3.\n early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.\n alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.\n stratified: Whether to use stratified sampling. Default: False.\n cone_angle: Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n\n Returns:\n A tuple of {LongTensor, Tensor, Tensor}:\n\n - **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).\n - **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).\n - **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).\n\n Examples:\n\n .. code-block:: python\n\n >>> ray_indices, t_starts, t_ends = grid.sampling(\n >>> rays_o, rays_d, render_step_size=1e-3)\n >>> t_mid = (t_starts + t_ends) / 2.0\n >>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]\n\n \"\"\"\n\n near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)\n far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)\n\n if t_min is not None:\n near_planes = torch.clamp(near_planes, min=t_min)\n if t_max is not None:\n far_planes = torch.clamp(far_planes, max=t_max)\n\n if stratified:\n near_planes += torch.rand_like(near_planes) * render_step_size\n intervals, samples, _ = traverse_grids(\n rays_o,\n rays_d,\n self.binaries,\n self.aabbs,\n near_planes=near_planes,\n far_planes=far_planes,\n step_size=render_step_size,\n cone_angle=cone_angle,\n )\n t_starts = intervals.vals[intervals.is_left]\n t_ends = intervals.vals[intervals.is_right]\n ray_indices = samples.ray_indices\n packed_info = samples.packed_info\n\n # skip invisible space\n if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (\n sigma_fn is not None or alpha_fn is not None\n ):\n alpha_thre = min(alpha_thre, self.occs.mean().item())\n\n # Compute visibility of the samples, and filter out invisible samples\n if sigma_fn is not None:\n if t_starts.shape[0] != 0:\n sigmas = sigma_fn(t_starts, t_ends, ray_indices)\n else:\n sigmas = torch.empty((0,), device=t_starts.device)\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n masks = render_visibility_from_density(\n t_starts=t_starts,\n t_ends=t_ends,\n sigmas=sigmas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n elif alpha_fn is not None:\n if t_starts.shape[0] != 0:\n alphas = alpha_fn(t_starts, t_ends, ray_indices)\n else:\n alphas = torch.empty((0,), device=t_starts.device)\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n masks = render_visibility_from_alpha(\n alphas=alphas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n ray_indices, t_starts, t_ends = (\n ray_indices[masks],\n t_starts[masks],\n t_ends[masks],\n )\n return ray_indices, t_starts, t_ends\n\n @torch.no_grad()\n def update_every_n_steps(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 1e-2,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n n: int = 16,\n ) -> None:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n step: Current training step.\n occ_eval_fn: A function that takes in sample locations :math:`(N, 3)` and\n returns the occupancy values :math:`(N, 1)` at those locations.\n occ_thre: Threshold used to binarize the occupancy grid. Default: 1e-2.\n ema_decay: The decay rate for EMA updates. Default: 0.95.\n warmup_steps: Sample all cells during the warmup stage. After the warmup\n stage we change the sampling strategy to 1/4 uniformly sampled cells\n together with 1/4 occupied cells. Default: 256.\n n: Update the grid every n steps. Default: 16.\n \"\"\"\n if not self.training:\n raise RuntimeError(\n \"You should only call this function only during training. \"\n \"Please call _update() directly if you want to update the \"\n \"field during inference.\"\n )\n if step % n == 0 and self.training:\n self._update(\n step=step,\n occ_eval_fn=occ_eval_fn,\n occ_thre=occ_thre,\n ema_decay=ema_decay,\n warmup_steps=warmup_steps,\n )\n\n # adapted from https://github.com/kwea123/ngp_pl/blob/master/models/networks.py\n @torch.no_grad()\n def mark_invisible_cells(\n self,\n K: Tensor,\n c2w: Tensor,\n width: int,\n height: int,\n near_plane: float = 0.0,\n chunk: int = 32**3,\n ) -> None:\n \"\"\"Mark the cells that aren't covered by the cameras with density -1.\n Should only be executed once before training starts.\n\n Args:\n K: Camera intrinsics of shape (N, 3, 3) or (1, 3, 3).\n c2w: Camera to world poses of shape (N, 3, 4) or (N, 4, 4).\n width: Image width in pixels\n height: Image height in pixels\n near_plane: Near plane distance\n chunk: The chunk size to split the cells (to avoid OOM)\n \"\"\"\n assert K.dim() == 3 and K.shape[1:] == (3, 3)\n assert c2w.dim() == 3 and (\n c2w.shape[1:] == (3, 4) or c2w.shape[1:] == (4, 4)\n )\n assert K.shape[0] == c2w.shape[0] or K.shape[0] == 1\n\n N_cams = c2w.shape[0]\n w2c_R = c2w[:, :3, :3].transpose(2, 1) # (N_cams, 3, 3)\n w2c_T = -w2c_R @ c2w[:, :3, 3:] # (N_cams, 3, 1)\n\n lvl_indices = self._get_all_cells()\n for lvl, indices in enumerate(lvl_indices):\n grid_coords = self.grid_coords[indices]\n\n for i in range(0, len(indices), chunk):\n x = grid_coords[i : i + chunk] / (self.resolution - 1)\n indices_chunk = indices[i : i + chunk]\n # voxel coordinates [0, 1]^3 -> world\n xyzs_w = (\n self.aabbs[lvl, :3]\n + x * (self.aabbs[lvl, 3:] - self.aabbs[lvl, :3])\n ).T\n xyzs_c = w2c_R @ xyzs_w + w2c_T # (N_cams, 3, chunk)\n uvd = K @ xyzs_c # (N_cams, 3, chunk)\n uv = uvd[:, :2] / uvd[:, 2:] # (N_cams, 2, chunk)\n in_image = (\n (uvd[:, 2] >= 0)\n & (uv[:, 0] >= 0)\n & (uv[:, 0] < width)\n & (uv[:, 1] >= 0)\n & (uv[:, 1] < height)\n )\n covered_by_cam = (\n uvd[:, 2] >= near_plane\n ) & in_image # (N_cams, chunk)\n # if the cell is visible by at least one camera\n count = covered_by_cam.sum(0) / N_cams\n\n too_near_to_cam = (\n uvd[:, 2] < near_plane\n ) & in_image # (N, chunk)\n # if the cell is too close (in front) to any camera\n too_near_to_any_cam = too_near_to_cam.any(0)\n # a valid cell should be visible by at least one camera and not too close to any camera\n valid_mask = (count > 0) & (~too_near_to_any_cam)\n\n cell_ids_base = lvl * self.cells_per_lvl\n self.occs[cell_ids_base + indices_chunk] = torch.where(\n valid_mask, 0.0, -1.0\n )\n\n @torch.no_grad()\n def _get_all_cells(self) -> List[Tensor]:\n \"\"\"Returns all cells of the grid.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + self.grid_indices\n indices = self.grid_indices[self.occs[cell_ids] >= 0.0]\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _sample_uniform_and_occupied_cells(self, n: int) -> List[Tensor]:\n \"\"\"Samples both n uniform and occupied cells.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n uniform_indices = torch.randint(\n self.cells_per_lvl, (n,), device=self.device\n )\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + uniform_indices\n uniform_indices = uniform_indices[self.occs[cell_ids] >= 0.0]\n occupied_indices = torch.nonzero(self.binaries[lvl].flatten())[:, 0]\n if n < len(occupied_indices):\n selector = torch.randint(\n len(occupied_indices), (n,), device=self.device\n )\n occupied_indices = occupied_indices[selector]\n indices = torch.cat([uniform_indices, occupied_indices], dim=0)\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _update(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 0.01,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n ) -> None:\n \"\"\"Update the occ field in the EMA way.\"\"\"\n # sample cells\n if step < warmup_steps:\n lvl_indices = self._get_all_cells()\n else:\n N = self.cells_per_lvl // 4\n lvl_indices = self._sample_uniform_and_occupied_cells(N)\n\n for lvl, indices in enumerate(lvl_indices):\n # infer occupancy: density * step_size\n grid_coords = self.grid_coords[indices]\n x = (\n grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)\n ) / self.resolution\n # voxel coordinates [0, 1]^3 -> world\n x = self.aabbs[lvl, :3] + x * (\n self.aabbs[lvl, 3:] - self.aabbs[lvl, :3]\n )\n occ = occ_eval_fn(x).squeeze(-1)\n # ema update\n cell_ids = lvl * self.cells_per_lvl + indices\n self.occs[cell_ids] = torch.maximum(\n self.occs[cell_ids] * ema_decay, occ\n )\n # suppose to use scatter max but emperically it is almost the same.\n # self.occs, _ = scatter_max(\n # occ, indices, dim=0, out=self.occs * ema_decay\n # )\n thre = torch.clamp(self.occs[self.occs >= 0].mean(), max=occ_thre)\n self.binaries = (self.occs > thre).view(self.binaries.shape)" }, { "identifier": "PropNetEstimator", "path": "nerfacc/estimators/prop_net.py", "snippet": "class PropNetEstimator(AbstractEstimator):\n \"\"\"Proposal network transmittance estimator.\n\n References: \"Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields.\"\n\n Args:\n optimizer: The optimizer to use for the proposal networks.\n scheduler: The learning rate scheduler to use for the proposal networks.\n \"\"\"\n\n def __init__(\n self,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n ) -> None:\n super().__init__()\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.prop_cache: List = []\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\"uniform\", \"lindisp\"] = \"lindisp\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Note:\n When `requires_grad` is `True`, the gradients are allowed to flow\n through the proposal networks, and the outputs of the proposal\n networks are cached to update them later when calling `update_every_n_steps()`\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for level_fn, level_samples in zip(prop_sigma_fns, prop_samples):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(\n t_starts, t_ends, sigmas\n )\n cdfs = 1.0 - torch.cat(\n [trans, torch.zeros_like(trans[:, :1])], dim=-1\n )\n if requires_grad:\n self.prop_cache.append((intervals, cdfs))\n\n intervals, _ = importance_sampling(\n intervals, cdfs, num_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n if requires_grad:\n self.prop_cache.append((intervals, None))\n\n return t_starts, t_ends\n\n @torch.enable_grad()\n def compute_loss(self, trans: Tensor, loss_scaler: float = 1.0) -> Tensor:\n \"\"\"Compute the loss for the proposal networks.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n loss_scaler: The loss scaler. Default to 1.0.\n\n Returns:\n The loss for the proposal networks.\n \"\"\"\n if len(self.prop_cache) == 0:\n return torch.zeros((), device=self.device)\n\n intervals, _ = self.prop_cache.pop()\n # get cdfs at all edges of intervals\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[:, :1])], dim=-1)\n cdfs = cdfs.detach()\n\n loss = 0.0\n while self.prop_cache:\n prop_intervals, prop_cdfs = self.prop_cache.pop()\n loss += _pdf_loss(intervals, cdfs, prop_intervals, prop_cdfs).mean()\n return loss * loss_scaler\n\n @torch.enable_grad()\n def update_every_n_steps(\n self,\n trans: Tensor,\n requires_grad: bool = False,\n loss_scaler: float = 1.0,\n ) -> float:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n loss_scaler: The loss scaler to use. Default to 1.0.\n\n Returns:\n The loss of the proposal networks for logging (a float scalar).\n \"\"\"\n if requires_grad:\n return self._update(trans=trans, loss_scaler=loss_scaler)\n else:\n if self.scheduler is not None:\n self.scheduler.step()\n return 0.0\n\n @torch.enable_grad()\n def _update(self, trans: Tensor, loss_scaler: float = 1.0) -> float:\n assert len(self.prop_cache) > 0\n assert self.optimizer is not None, \"No optimizer is provided.\"\n\n loss = self.compute_loss(trans, loss_scaler)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n return loss.item()" }, { "identifier": "ray_aabb_intersect", "path": "nerfacc/grid.py", "snippet": "@torch.no_grad()\ndef ray_aabb_intersect(\n rays_o: Tensor,\n rays_d: Tensor,\n aabbs: Tensor,\n near_plane: float = -float(\"inf\"),\n far_plane: float = float(\"inf\"),\n miss_value: float = float(\"inf\"),\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Ray-AABB intersection.\n\n Args:\n rays_o: (n_rays, 3) Ray origins.\n rays_d: (n_rays, 3) Normalized ray directions.\n aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.\n near_plane: Optional. Near plane. Default to -infinity.\n far_plane: Optional. Far plane. Default to infinity.\n miss_value: Optional. Value to use for tmin and tmax when there is no intersection.\n Default to infinity.\n\n Returns:\n A tuple of {Tensor, Tensor, BoolTensor}:\n\n - **t_mins**: (n_rays, m) tmin for each ray-AABB pair.\n - **t_maxs**: (n_rays, m) tmax for each ray-AABB pair.\n - **hits**: (n_rays, m) whether each ray-AABB pair intersects.\n \"\"\"\n assert rays_o.ndim == 2 and rays_o.shape[-1] == 3\n assert rays_d.ndim == 2 and rays_d.shape[-1] == 3\n assert aabbs.ndim == 2 and aabbs.shape[-1] == 6\n t_mins, t_maxs, hits = _C.ray_aabb_intersect(\n rays_o.contiguous(),\n rays_d.contiguous(),\n aabbs.contiguous(),\n near_plane,\n far_plane,\n miss_value,\n )\n return t_mins, t_maxs, hits" }, { "identifier": "traverse_grids", "path": "nerfacc/grid.py", "snippet": "@torch.no_grad()\ndef traverse_grids(\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # grids\n binaries: Tensor, # [m, resx, resy, resz]\n aabbs: Tensor, # [m, 6]\n # options\n near_planes: Optional[Tensor] = None, # [n_rays]\n far_planes: Optional[Tensor] = None, # [n_rays]\n step_size: Optional[float] = 1e-3,\n cone_angle: Optional[float] = 0.0,\n traverse_steps_limit: Optional[int] = None,\n over_allocate: Optional[bool] = False,\n rays_mask: Optional[Tensor] = None, # [n_rays]\n # pre-compute intersections\n t_sorted: Optional[Tensor] = None, # [n_rays, n_grids * 2]\n t_indices: Optional[Tensor] = None, # [n_rays, n_grids * 2]\n hits: Optional[Tensor] = None, # [n_rays, n_grids]\n) -> Tuple[RayIntervals, RaySamples, Tensor]:\n \"\"\"Ray Traversal within Multiple Grids.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: (n_rays, 3) Ray origins.\n rays_d: (n_rays, 3) Normalized ray directions.\n binary_grids: (m, resx, resy, resz) Multiple binary grids with the same resolution.\n aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.\n near_planes: Optional. (n_rays,) Near planes for the traversal to start. Default to 0.\n far_planes: Optional. (n_rays,) Far planes for the traversal to end. Default to infinity.\n step_size: Optional. Step size for ray traversal. Default to 1e-3.\n cone_angle: Optional. Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n traverse_steps_limit: Optional. Maximum number of samples per ray.\n over_allocate: Optional. Whether to over-allocate the memory for the outputs.\n rays_mask: Optional. (n_rays,) Skip some rays if given.\n t_sorted: Optional. (n_rays, n_grids * 2) Pre-computed sorted t values for each ray-grid pair. Default to None.\n t_indices: Optional. (n_rays, n_grids * 2) Pre-computed sorted t indices for each ray-grid pair. Default to None.\n hits: Optional. (n_rays, n_grids) Pre-computed hit flags for each ray-grid pair. Default to None.\n\n Returns:\n A :class:`RayIntervals` object containing the intervals of the ray traversal, and\n a :class:`RaySamples` object containing the samples within each interval.\n t :class:`Tensor` of shape (n_rays,) containing the terminated t values for each ray.\n \"\"\"\n\n if near_planes is None:\n near_planes = torch.zeros_like(rays_o[:, 0])\n if far_planes is None:\n far_planes = torch.full_like(rays_o[:, 0], float(\"inf\"))\n\n if rays_mask is None:\n rays_mask = torch.ones_like(rays_o[:, 0], dtype=torch.bool)\n if traverse_steps_limit is None:\n traverse_steps_limit = -1\n if over_allocate:\n assert (\n traverse_steps_limit > 0\n ), \"traverse_steps_limit must be set if over_allocate is True.\"\n\n if t_sorted is None or t_indices is None or hits is None:\n # Compute ray aabb intersection for all levels of grid. [n_rays, m]\n t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, aabbs)\n # Sort the t values for each ray. [n_rays, m]\n t_sorted, t_indices = torch.sort(\n torch.cat([t_mins, t_maxs], dim=-1), dim=-1\n )\n\n # Traverse the grids.\n intervals, samples, termination_planes = _C.traverse_grids(\n # rays\n rays_o.contiguous(), # [n_rays, 3]\n rays_d.contiguous(), # [n_rays, 3]\n rays_mask.contiguous(), # [n_rays]\n # grids\n binaries.contiguous(), # [m, resx, resy, resz]\n aabbs.contiguous(), # [m, 6]\n # intersections\n t_sorted.contiguous(), # [n_rays, m * 2]\n t_indices.contiguous(), # [n_rays, m * 2]\n hits.contiguous(), # [n_rays, m]\n # options\n near_planes.contiguous(), # [n_rays]\n far_planes.contiguous(), # [n_rays]\n step_size,\n cone_angle,\n True,\n True,\n True,\n traverse_steps_limit,\n over_allocate,\n )\n return (\n RayIntervals._from_cpp(intervals),\n RaySamples._from_cpp(samples),\n termination_planes,\n )" }, { "identifier": "accumulate_along_rays_", "path": "nerfacc/volrend.py", "snippet": "def accumulate_along_rays_(\n weights: Tensor,\n values: Optional[Tensor] = None,\n ray_indices: Optional[Tensor] = None,\n outputs: Optional[Tensor] = None,\n) -> None:\n \"\"\"Accumulate volumetric values along the ray.\n\n Inplace version of :func:`accumulate_along_rays`.\n \"\"\"\n if values is None:\n src = weights[..., None]\n else:\n assert values.dim() == weights.dim() + 1\n assert weights.shape == values.shape[:-1]\n src = weights[..., None] * values\n if ray_indices is not None:\n assert weights.dim() == 1, \"weights must be flattened\"\n assert (\n outputs.dim() == 2 and outputs.shape[-1] == src.shape[-1]\n ), \"outputs must be of shape (n_rays, D)\"\n outputs.index_add_(0, ray_indices, src)\n else:\n outputs.add_(src.sum(dim=-2))" }, { "identifier": "render_weight_from_density", "path": "nerfacc/volrend.py", "snippet": "def render_weight_from_density(\n t_starts: Tensor,\n t_ends: Tensor,\n sigmas: Tensor,\n packed_info: Optional[Tensor] = None,\n ray_indices: Optional[Tensor] = None,\n n_rays: Optional[int] = None,\n prefix_trans: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Compute rendering weights :math:`w_i` from density :math:`\\\\sigma_i` and interval :math:`\\\\delta_i`.\n\n .. math::\n w_i = T_i(1 - exp(-\\\\sigma_i\\delta_i)), \\\\quad\\\\textrm{where}\\\\quad T_i = exp(-\\\\sum_{j=1}^{i-1}\\\\sigma_j\\delta_j)\n\n This function supports both batched and flattened input tensor. For flattened input tensor, either\n (`packed_info`) or (`ray_indices` and `n_rays`) should be provided.\n\n Args:\n t_starts: The start time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n t_ends: The end time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n sigmas: The density values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n packed_info: A tensor of shape (n_rays, 2) that specifies the start and count\n of each chunk in the flattened samples, with in total n_rays chunks.\n Useful for flattened input.\n ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).\n n_rays: Number of rays. Only useful when `ray_indices` is provided.\n prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).\n\n Returns:\n The rendering weights, transmittance and opacities, both with the same shape as `sigmas`.\n\n Examples:\n\n .. code-block:: python\n\n >>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device=\"cuda\")\n >>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device=\"cuda\")\n >>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device=\"cuda\")\n >>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device=\"cuda\")\n >>> weights, transmittance, alphas = render_weight_from_density(\n >>> t_starts, t_ends, sigmas, ray_indices=ray_indices)\n weights: [0.33, 0.37, 0.03, 0.55, 0.04, 0.00, 0.59]\n transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00]\n alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59]\n\n \"\"\"\n trans, alphas = render_transmittance_from_density(\n t_starts, t_ends, sigmas, packed_info, ray_indices, n_rays, prefix_trans\n )\n weights = trans * alphas\n return weights, trans, alphas" }, { "identifier": "rendering", "path": "nerfacc/volrend.py", "snippet": "def rendering(\n # ray marching results\n t_starts: Tensor,\n t_ends: Tensor,\n ray_indices: Optional[Tensor] = None,\n n_rays: Optional[int] = None,\n # radiance field\n rgb_sigma_fn: Optional[Callable] = None,\n rgb_alpha_fn: Optional[Callable] = None,\n # rendering options\n render_bkgd: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor, Dict]:\n \"\"\"Render the rays through the radience field defined by `rgb_sigma_fn`.\n\n This function is differentiable to the outputs of `rgb_sigma_fn` so it can\n be used for gradient-based optimization. It supports both batched and flattened input tensor.\n For flattened input tensor, both `ray_indices` and `n_rays` should be provided.\n\n\n Note:\n Either `rgb_sigma_fn` or `rgb_alpha_fn` should be provided.\n\n Warning:\n This function is not differentiable to `t_starts`, `t_ends` and `ray_indices`.\n\n Args:\n t_starts: Per-sample start distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n t_ends: Per-sample end distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).\n n_rays: Number of rays. Only useful when `ray_indices` is provided.\n rgb_sigma_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and density\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n rgb_alpha_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and opacity\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n render_bkgd: Background color. Tensor with shape (3,).\n\n Returns:\n Ray colors (n_rays, 3), opacities (n_rays, 1), depths (n_rays, 1) and a dict\n containing extra intermediate results (e.g., \"weights\", \"trans\", \"alphas\")\n\n Examples:\n\n .. code-block:: python\n\n >>> t_starts = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.3], device=\"cuda:0\")\n >>> t_ends = torch.tensor([0.2, 0.3, 0.2, 0.3, 0.4], device=\"cuda:0\")\n >>> ray_indices = torch.tensor([0, 0, 1, 1, 1], device=\"cuda:0\")\n >>> def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n >>> # This is a dummy function that returns random values.\n >>> rgbs = torch.rand((t_starts.shape[0], 3), device=\"cuda:0\")\n >>> sigmas = torch.rand((t_starts.shape[0],), device=\"cuda:0\")\n >>> return rgbs, sigmas\n >>> colors, opacities, depths, extras = rendering(\n >>> t_starts, t_ends, ray_indices, n_rays=2, rgb_sigma_fn=rgb_sigma_fn)\n >>> print(colors.shape, opacities.shape, depths.shape)\n torch.Size([2, 3]) torch.Size([2, 1]) torch.Size([2, 1])\n >>> extras.keys()\n dict_keys(['weights', 'alphas', 'trans'])\n\n \"\"\"\n if ray_indices is not None:\n assert (\n t_starts.shape == t_ends.shape == ray_indices.shape\n ), \"Since nerfacc 0.5.0, t_starts, t_ends and ray_indices must have the same shape (N,). \"\n\n if rgb_sigma_fn is None and rgb_alpha_fn is None:\n raise ValueError(\n \"At least one of `rgb_sigma_fn` and `rgb_alpha_fn` should be specified.\"\n )\n\n # Query sigma/alpha and color with gradients\n if rgb_sigma_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n sigmas = torch.empty((0,), device=t_starts.device)\n assert rgbs.shape[-1] == 3, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n # Rendering: compute weights.\n weights, trans, alphas = render_weight_from_density(\n t_starts,\n t_ends,\n sigmas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"alphas\": alphas,\n \"trans\": trans,\n \"sigmas\": sigmas,\n \"rgbs\": rgbs,\n }\n elif rgb_alpha_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, alphas = rgb_alpha_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n alphas = torch.empty((0,), device=t_starts.device)\n assert rgbs.shape[-1] == 3, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n # Rendering: compute weights.\n weights, trans = render_weight_from_alpha(\n alphas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"trans\": trans,\n \"rgbs\": rgbs,\n \"alphas\": alphas,\n }\n\n # Rendering: accumulate rgbs, opacities, and depths along the rays.\n colors = accumulate_along_rays(\n weights, values=rgbs, ray_indices=ray_indices, n_rays=n_rays\n )\n opacities = accumulate_along_rays(\n weights, values=None, ray_indices=ray_indices, n_rays=n_rays\n )\n depths = accumulate_along_rays(\n weights,\n values=(t_starts + t_ends)[..., None] / 2.0,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n depths = depths / opacities.clamp_min(torch.finfo(rgbs.dtype).eps)\n\n # Background composition.\n if render_bkgd is not None:\n colors = colors + render_bkgd * (1.0 - opacities)\n\n return colors, opacities, depths, extras" } ]
import random import numpy as np import torch from typing import Optional, Sequence from typing import Literal from typing_extensions import Literal from datasets.utils import Rays, namedtuple_map from torch.utils.data._utils.collate import collate, default_collate_fn_map from nerfacc.estimators.occ_grid import OccGridEstimator from nerfacc.estimators.prop_net import PropNetEstimator from nerfacc.grid import ray_aabb_intersect, traverse_grids from nerfacc.volrend import ( accumulate_along_rays_, render_weight_from_density, rendering, )
11,006
""" Copyright (c) 2022 Ruilong Li, UC Berkeley. """ try: except ImportError: NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module,
""" Copyright (c) 2022 Ruilong Li, UC Berkeley. """ try: except ImportError: NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module,
estimator: OccGridEstimator,
0
2023-11-27 22:12:55+00:00
16k
facebookresearch/SOC-matching
main.py
[ { "identifier": "get_folder_name", "path": "SOC_matching/utils.py", "snippet": "def get_folder_name(cfg):\n folder_name = (\n cfg.method.algorithm\n + \"_\"\n + cfg.method.setting\n + \"_\"\n + str(cfg.method.lmbd)\n + \"_\"\n + str(cfg.method.T)\n + \"_\"\n + str(cfg.method.num_steps)\n + \"_\"\n + str(cfg.method.use_warm_start)\n + \"_\"\n + str(cfg.method.seed)\n + \"_\"\n + str(cfg.optim.batch_size)\n + \"_\"\n + str(cfg.optim.M_lr)\n + \"_\"\n + str(cfg.optim.nabla_V_lr)\n )\n return folder_name" }, { "identifier": "get_file_name", "path": "SOC_matching/utils.py", "snippet": "def get_file_name(folder_name, num_iterations=0, last=False):\n if last:\n return folder_name + \"/last.pkl\"\n file_name = str(num_iterations)\n print(f\"folder_name: {folder_name}\")\n return folder_name + \"/\" + file_name + \".pkl\"" }, { "identifier": "control_objective", "path": "SOC_matching/utils.py", "snippet": "def control_objective(\n sde, x0, ts, lmbd, batch_size, total_n_samples=65536, verbose=False\n):\n n_batches = int(total_n_samples // batch_size)\n effective_n_samples = n_batches * batch_size\n for k in range(n_batches):\n state0 = x0.repeat(batch_size, 1)\n (\n _,\n _,\n _,\n _,\n log_path_weight_deterministic,\n _,\n log_terminal_weight,\n _,\n ) = stochastic_trajectories(\n sde,\n state0,\n ts.to(state0),\n lmbd,\n verbose=verbose,\n )\n if k == 0:\n ctrl_losses = -lmbd * (log_path_weight_deterministic + log_terminal_weight)\n else:\n ctrl_loss = -lmbd * (log_path_weight_deterministic + log_terminal_weight)\n ctrl_losses = torch.cat((ctrl_losses, ctrl_loss), 0)\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches} done\")\n return torch.mean(ctrl_losses), torch.std(ctrl_losses) / np.sqrt(\n effective_n_samples - 1\n )" }, { "identifier": "save_results", "path": "SOC_matching/utils.py", "snippet": "def save_results(results, folder_name, file_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n with open(file_name, \"wb\") as f:\n pickle.dump(results, f)" }, { "identifier": "compute_EMA", "path": "SOC_matching/utils.py", "snippet": "def compute_EMA(value, EMA_value, EMA_coeff=0.01, itr=0):\n itr_avg = int(np.floor(1 / EMA_coeff))\n if itr == 0:\n return value\n elif itr <= itr_avg:\n return (value + itr * EMA_value) / (itr + 1)\n else:\n return EMA_coeff * value + (1 - EMA_coeff) * EMA_value" }, { "identifier": "normalization_constant", "path": "SOC_matching/utils.py", "snippet": "def normalization_constant(\n sde, x0, ts, cfg, n_batches_normalization=512, ground_truth_control=None\n):\n log_weights_list = []\n weights_list = []\n\n if ground_truth_control is not None:\n norm_sqd_diff_mean = 0\n for k in range(n_batches_normalization):\n (\n states,\n _,\n _,\n _,\n log_path_weight_deterministic,\n log_path_weight_stochastic,\n log_terminal_weight,\n controls,\n ) = stochastic_trajectories(\n sde,\n x0,\n ts.to(x0),\n cfg.method.lmbd,\n )\n log_weights = (\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n log_weights_list.append(log_weights)\n weights = torch.exp(\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n weights_list.append(weights)\n\n if ground_truth_control is not None:\n gt_controls = ground_truth_control(ts, states, t_is_tensor=True)[\n :-1, :, :\n ].detach()\n norm_sqd_diff = torch.sum(\n (gt_controls - controls) ** 2\n * weights.unsqueeze(0).unsqueeze(2)\n / (gt_controls.shape[0] * gt_controls.shape[1])\n )\n norm_sqd_diff_mean += norm_sqd_diff\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches_normalization} done\")\n if ground_truth_control is not None:\n norm_sqd_diff_mean = norm_sqd_diff_mean / n_batches_normalization\n else:\n norm_sqd_diff_mean = None\n\n log_weights = torch.stack(log_weights_list, dim=1)\n weights = torch.stack(weights_list, dim=1)\n\n print(\n f\"Average and std. dev. of log_weights for all batches: {torch.mean(log_weights)} {torch.std(log_weights)}\"\n )\n\n normalization_const = torch.mean(weights)\n normalization_const_std_error = torch.std(weights) / np.sqrt(\n weights.shape[0] * weights.shape[1] - 1\n )\n return normalization_const, normalization_const_std_error, norm_sqd_diff_mean" }, { "identifier": "SOC_Solver", "path": "SOC_matching/method.py", "snippet": "class SOC_Solver(nn.Module):\n noise_type = \"diagonal\"\n sde_type = \"ito\"\n\n def __init__(\n self,\n neural_sde,\n x0,\n ut,\n T=1.0,\n num_steps=100,\n lmbd=1.0,\n d=2,\n sigma=torch.eye(2),\n ):\n super().__init__()\n self.dim = neural_sde.dim\n self.neural_sde = neural_sde\n self.x0 = x0\n self.ut = ut\n self.T = T\n self.ts = torch.linspace(0, T, num_steps + 1).to(x0.device)\n self.num_steps = num_steps\n self.dt = T / num_steps\n self.lmbd = lmbd\n self.d = d\n self.y0 = torch.nn.Parameter(torch.randn(1, device=x0.device))\n self.sigma = sigma\n\n def control(self, t0, x0):\n x0 = x0.reshape(-1, self.dim)\n t0_expanded = t0.reshape(-1, 1).expand(x0.shape[0], 1)\n tx = torch.cat([t0_expanded, x0], dim=-1)\n nabla_V = self.neural_sde.nabla_V(tx)\n learned_control = -torch.einsum(\n \"ij,bj->bi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n return learned_control\n\n def control_objective(self, batch_size, total_n_samples=65536):\n n_batches = int(total_n_samples // batch_size)\n effective_n_samples = n_batches * batch_size\n for k in range(n_batches):\n state0 = self.x0.repeat(batch_size, 1)\n (\n states,\n _,\n _,\n _,\n log_path_weight_deterministic,\n _,\n log_terminal_weight,\n _,\n ) = utils.stochastic_trajectories(\n self.neural_sde,\n state0,\n self.ts.to(state0),\n self.lmbd,\n )\n if k == 0:\n ctrl_losses = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n trajectory = states\n else:\n ctrl_loss = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n ctrl_losses = torch.cat((ctrl_losses, ctrl_loss), 0)\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches} done\")\n return (\n torch.mean(ctrl_losses),\n torch.std(ctrl_losses) / np.sqrt(effective_n_samples - 1),\n trajectory,\n )\n\n def loss(\n self,\n batch_size,\n compute_L2_error=False,\n optimal_control=None,\n compute_control_objective=False,\n algorithm=\"SOCM_const_M\",\n add_weights=False,\n total_n_samples=65536,\n verbose=False,\n u_warm_start=None,\n use_warm_start=True,\n use_stopping_time=False,\n ):\n\n state0 = self.x0.repeat(batch_size, 1)\n d = state0.shape[1]\n detach = algorithm != \"rel_entropy\"\n (\n states,\n noises,\n stop_indicators,\n fractional_timesteps,\n log_path_weight_deterministic,\n log_path_weight_stochastic,\n log_terminal_weight,\n controls,\n ) = utils.stochastic_trajectories(\n self.neural_sde,\n state0,\n self.ts.to(state0),\n self.lmbd,\n detach=detach,\n )\n unsqueezed_stop_indicators = stop_indicators.unsqueeze(2)\n weight = torch.exp(\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n\n if algorithm == \"rel_entropy\":\n ctrl_losses = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n objective = torch.mean(ctrl_losses)\n weight = weight.detach()\n learned_control = controls.detach()\n else:\n ts_repeat = self.ts.unsqueeze(1).unsqueeze(2).repeat(1, states.shape[1], 1)\n tx = torch.cat([ts_repeat, states], dim=-1)\n tx_reshape = torch.reshape(tx, (-1, tx.shape[2]))\n\n # Evaluate nabla_V\n nabla_V = self.neural_sde.nabla_V(tx_reshape)\n nabla_V = torch.reshape(nabla_V, states.shape)\n\n if u_warm_start and use_warm_start:\n sigma_inverse_transpose = torch.transpose(\n torch.inverse(self.sigma), 0, 1\n )\n u_warm_start_eval = u_warm_start(self.ts, states).detach()\n nabla_V = nabla_V - torch.einsum(\n \"ij,abj->abi\", sigma_inverse_transpose, u_warm_start_eval\n )\n\n if algorithm == \"SOCM_const_M\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n least_squares_target_integrand_term_1 = (\n self.neural_sde.nabla_f(self.ts[0], states)\n )[:-1, :, :]\n least_squares_target_integrand_term_2 = -np.sqrt(self.lmbd) * torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n least_squares_target_integrand_term_3 = -torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n least_squares_target_terminal = self.neural_sde.nabla_g(states[-1, :, :])\n\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_1[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_2[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_3_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_3[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_3\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n\n cumulative_sum_least_squares_term_1 = torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n )\n cumulative_sum_least_squares_term_2 = torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n )\n cumulative_sum_least_squares_term_3 = torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n )\n least_squares_target = (\n cumulative_sum_least_squares_term_1\n + cumulative_sum_least_squares_term_2\n + cumulative_sum_least_squares_term_3\n + least_squares_target_terminal.unsqueeze(0)\n )\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), least_squares_target\n )\n\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM_exp\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n exp_factor = torch.exp(-self.gamma * self.ts)\n identity = torch.eye(d).to(self.x0.device)\n least_squares_target_integrand_term_1 = (\n exp_factor.unsqueeze(1).unsqueeze(2)\n * self.neural_sde.nabla_f(self.ts[0], states)\n )[:-1, :, :]\n least_squares_target_integrand_term_2 = exp_factor[:-1].unsqueeze(\n 1\n ).unsqueeze(2) * (\n -np.sqrt(self.lmbd)\n * torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :]\n + self.gamma * identity,\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n )\n least_squares_target_integrand_term_3 = exp_factor[:-1].unsqueeze(\n 1\n ).unsqueeze(2) * (\n -torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :]\n + self.gamma * identity,\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n )\n least_squares_target_terminal = torch.exp(\n -self.gamma * (self.T - self.ts)\n ).unsqueeze(1).unsqueeze(2) * self.neural_sde.nabla_g(\n states[-1, :, :]\n ).unsqueeze(\n 0\n )\n\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_1[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_2[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_3_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_3[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_3\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n\n inv_exp_factor = 1 / exp_factor\n cumsum_least_squares_term_1 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(least_squares_target_integrand_term_1_times_dt, dim=0)\n )\n cumsum_least_squares_term_2 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n )\n )\n cumsum_least_squares_term_3 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(least_squares_target_integrand_term_3_times_dt, dim=0)\n )\n\n least_squares_target = (\n cumsum_least_squares_term_1\n + cumsum_least_squares_term_2\n + cumsum_least_squares_term_3\n + least_squares_target_terminal\n )\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), least_squares_target\n )\n\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n identity = torch.eye(d).to(self.x0.device)\n\n if use_stopping_time:\n sum_M = lambda t, s, stopping_timestep_values: self.neural_sde.M(\n t, s, stopping_timestep_values\n ).sum(dim=0)\n\n derivative_M_0 = functorch.jacrev(sum_M, argnums=1)\n derivative_M = lambda t, s, stopping_timestep_values: torch.transpose(\n torch.transpose(\n torch.transpose(\n derivative_M_0(t, s, stopping_timestep_values), 2, 3\n ),\n 1,\n 2,\n ),\n 0,\n 1,\n )\n\n M_evals = torch.zeros(len(self.ts), len(self.ts), batch_size, d, d).to(\n self.ts.device\n )\n derivative_M_evals = torch.zeros(\n len(self.ts), len(self.ts), batch_size, d, d\n ).to(self.ts.device)\n\n else:\n sum_M = lambda t, s: self.neural_sde.M(t, s).sum(dim=0)\n\n derivative_M_0 = functorch.jacrev(sum_M, argnums=1)\n derivative_M = lambda t, s: torch.transpose(\n torch.transpose(derivative_M_0(t, s), 1, 2), 0, 1\n )\n\n M_evals = torch.zeros(len(self.ts), len(self.ts), d, d).to(\n self.ts.device\n )\n derivative_M_evals = torch.zeros(len(self.ts), len(self.ts), d, d).to(\n self.ts.device\n )\n\n if use_stopping_time:\n stopping_function_output_int = (self.neural_sde.Phi(states) > 0).to(\n torch.int\n )\n stopping_timestep = (\n torch.sum(stopping_function_output_int, dim=0) - 1\n ) / (len(self.ts) - 1)\n stopping_timestep_vector = []\n\n s_vector = []\n t_vector = []\n for k, t in enumerate(self.ts):\n s_vector.append(\n torch.linspace(t, self.T, self.num_steps + 1 - k).to(self.ts.device)\n )\n t_vector.append(\n t * torch.ones(self.num_steps + 1 - k).to(self.ts.device)\n )\n if use_stopping_time:\n stopping_timestep_vector.append(\n stopping_timestep.unsqueeze(0).repeat(self.num_steps + 1 - k, 1)\n )\n s_vector = torch.cat(s_vector)\n t_vector = torch.cat(t_vector)\n if use_stopping_time:\n stopping_timestep_vector = torch.cat(stopping_timestep_vector, dim=0)\n M_evals_all = self.neural_sde.M(\n t_vector, s_vector, stopping_timestep_vector\n )\n derivative_M_evals_all = torch.nan_to_num(\n derivative_M(t_vector, s_vector, stopping_timestep_vector)\n )\n counter = 0\n for k, t in enumerate(self.ts):\n M_evals[k, k:, :, :, :] = M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :, :\n ]\n derivative_M_evals[k, k:, :, :, :] = derivative_M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :, :\n ]\n counter += self.num_steps + 1 - k\n else:\n M_evals_all = self.neural_sde.M(\n t_vector,\n s_vector,\n )\n derivative_M_evals_all = derivative_M(\n t_vector,\n s_vector,\n )\n counter = 0\n for k, t in enumerate(self.ts):\n M_evals[k, k:, :, :] = M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :\n ]\n derivative_M_evals[k, k:, :, :] = derivative_M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :\n ]\n counter += self.num_steps + 1 - k\n\n if use_stopping_time:\n least_squares_target_integrand_term_1 = torch.einsum(\n \"ijmkl,jml->ijmk\",\n M_evals,\n self.neural_sde.nabla_f(self.ts, states),\n )[:, :-1, :, :]\n else:\n least_squares_target_integrand_term_1 = torch.einsum(\n \"ijkl,jml->ijmk\",\n M_evals,\n self.neural_sde.nabla_f(self.ts, states),\n )[:, :-1, :, :]\n\n if use_stopping_time:\n M_nabla_b_term = (\n torch.einsum(\n \"ijmkl,jmln->ijmkn\",\n M_evals,\n self.neural_sde.nabla_b(self.ts, states),\n )\n - derivative_M_evals\n )\n least_squares_target_integrand_term_2 = -np.sqrt(\n self.lmbd\n ) * torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n else:\n M_nabla_b_term = torch.einsum(\n \"ijkl,jmln->ijmkn\",\n M_evals,\n self.neural_sde.nabla_b(self.ts, states),\n ) - derivative_M_evals.unsqueeze(2)\n least_squares_target_integrand_term_2 = -np.sqrt(\n self.lmbd\n ) * torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n\n least_squares_target_integrand_term_3 = -torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n\n if use_stopping_time:\n M_evals_final = M_evals[:, -1, :, :, :]\n least_squares_target_terminal = torch.einsum(\n \"imkl,ml->imk\",\n M_evals_final,\n self.neural_sde.nabla_g(states[-1, :, :]),\n )\n else:\n M_evals_final = M_evals[:, -1, :, :]\n least_squares_target_terminal = torch.einsum(\n \"ikl,ml->imk\",\n M_evals_final,\n self.neural_sde.nabla_g(states[-1, :, :]),\n )\n\n if use_stopping_time:\n least_squares_target_integrand_term_1_times_dt = (\n least_squares_target_integrand_term_1\n * fractional_timesteps.unsqueeze(0).unsqueeze(3)\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = (\n least_squares_target_integrand_term_2\n * torch.sqrt(fractional_timesteps).unsqueeze(0).unsqueeze(3)\n )\n least_squares_target_integrand_term_3_times_dt = (\n least_squares_target_integrand_term_3\n * fractional_timesteps.unsqueeze(0).unsqueeze(3)\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = (\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2).unsqueeze(0)\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = (\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2)\n )\n least_squares_target_integrand_term_3_times_dt = (\n least_squares_target_integrand_term_3 * dts.unsqueeze(1).unsqueeze(2)\n )\n\n cumsum_least_squares_term_1 = torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=1\n )\n cumsum_least_squares_term_2 = torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=1\n )\n cumsum_least_squares_term_3 = torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=1\n )\n\n least_squares_target = (\n cumsum_least_squares_term_1\n + cumsum_least_squares_term_2\n + cumsum_least_squares_term_3\n + least_squares_target_terminal\n )\n\n if use_stopping_time:\n control_learned = -unsqueezed_stop_indicators * torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -unsqueezed_stop_indicators * torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n least_squares_target,\n )\n else:\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n least_squares_target,\n )\n\n if use_stopping_time:\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (torch.sum(stop_indicators))\n else:\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM_adjoint\":\n nabla_f_evals = self.neural_sde.nabla_f(self.ts, states)\n nabla_b_evals = self.neural_sde.nabla_b(self.ts, states)\n nabla_g_evals = self.neural_sde.nabla_g(states[-1, :, :])\n\n # print(f'nabla_b_evals.shape: {nabla_b_evals.shape}')\n\n a_vectors = torch.zeros_like(states)\n a = nabla_g_evals\n a_vectors[-1, :, :] = a\n\n for k in range(1,len(self.ts)):\n # a += self.dt * (nabla_f_evals[-1-k, :, :] + torch.einsum(\"mkl,ml->mk\", nabla_b_evals[-1-k, :, :, :], a))\n a += self.dt * ((nabla_f_evals[-1-k, :, :] + nabla_f_evals[-k, :, :]) / 2 + torch.einsum(\"mkl,ml->mk\", (nabla_b_evals[-1-k, :, :, :] + nabla_b_evals[-k, :, :, :]) / 2, a))\n a_vectors[-1-k, :, :] = a\n\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n a_vectors,\n )\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n elif algorithm == \"cross_entropy\":\n learned_controls = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n integrand_term_1 = -(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * controls, dim=2\n )\n integrand_term_2 = (1 / (2 * self.lmbd)) * torch.sum(\n learned_controls**2, dim=2\n )[:-1, :]\n deterministic_integrand = integrand_term_1 + integrand_term_2\n stochastic_integrand = -np.sqrt(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * noises, dim=2\n )\n\n if use_stopping_time:\n deterministic_integrand_times_dt = (\n deterministic_integrand * fractional_timesteps\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n fractional_timesteps\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n deterministic_integrand_times_dt = (\n deterministic_integrand * dts.unsqueeze(1)\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n dts\n ).unsqueeze(1)\n\n deterministic_term = torch.sum(deterministic_integrand_times_dt, dim=0)\n stochastic_term = torch.sum(stochastic_integrand_times_sqrt_dt, dim=0)\n\n objective = torch.mean((deterministic_term + stochastic_term) * weight)\n\n elif (\n algorithm == \"variance\"\n or algorithm == \"log-variance\"\n or algorithm == \"moment\"\n ):\n learned_controls = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n integrand_term_1 = -(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * controls, dim=2\n )\n integrand_term_2 = (1 / (2 * self.lmbd)) * torch.sum(\n learned_controls**2, dim=2\n )[:-1, :]\n integrand_term_3 = (\n -(1 / self.lmbd) * self.neural_sde.f(self.ts[0], states)[:-1, :]\n )\n deterministic_integrand = (\n integrand_term_1 + integrand_term_2 + integrand_term_3\n )\n stochastic_integrand = -np.sqrt(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * noises, dim=2\n )\n if use_stopping_time:\n deterministic_integrand = (\n deterministic_integrand * stop_indicators[:-1, :]\n )\n stochastic_integrand = stochastic_integrand * stop_indicators[:-1, :]\n\n if use_stopping_time:\n deterministic_integrand_times_dt = (\n deterministic_integrand * fractional_timesteps\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n fractional_timesteps\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n deterministic_integrand_times_dt = (\n deterministic_integrand * dts.unsqueeze(1)\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n dts\n ).unsqueeze(1)\n\n deterministic_term = torch.sum(deterministic_integrand_times_dt, dim=0)\n stochastic_term = torch.sum(stochastic_integrand_times_sqrt_dt, dim=0)\n g_term = -(1 / self.lmbd) * self.neural_sde.g(states[-1, :, :])\n if algorithm == \"log-variance\":\n sum_terms = deterministic_term + stochastic_term + g_term\n elif algorithm == \"variance\":\n sum_terms = torch.exp(deterministic_term + stochastic_term + g_term)\n elif algorithm == \"moment\":\n sum_terms = deterministic_term + stochastic_term + g_term + self.y0\n\n if add_weights:\n weight_2 = weight\n else:\n weight_2 = torch.ones_like(weight)\n if algorithm == \"log-variance\" or algorithm == \"variance\":\n objective = (\n len(sum_terms)\n / (len(sum_terms) - 1)\n * (\n torch.mean(sum_terms**2 * weight_2)\n - torch.mean(sum_terms * weight_2) ** 2\n )\n )\n elif algorithm == \"moment\":\n objective = torch.mean(sum_terms**2 * weight_2)\n\n if compute_L2_error:\n if algorithm == \"rel_entropy\":\n target_control = optimal_control(self.ts, states, t_is_tensor=True)[\n :-1, :, :\n ].detach()\n else:\n target_control = optimal_control(self.ts, states, t_is_tensor=True)\n if algorithm != \"rel_entropy\":\n learned_control = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n norm_sqd_diff = torch.sum(\n (target_control - learned_control) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n / (target_control.shape[0] * target_control.shape[1])\n )\n else:\n norm_sqd_diff = None\n\n if compute_control_objective:\n ctrl_loss_mean, ctrl_loss_std_err, trajectory = self.control_objective(\n batch_size, total_n_samples=total_n_samples\n )\n else:\n ctrl_loss_mean = None\n ctrl_loss_std_err = None\n trajectory = None\n\n if verbose:\n # To print amount of memory used in GPU\n nvidia_smi.nvmlInit()\n handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)\n # card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate\n info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)\n print(\"Total memory:\", info.total / 1048576, \"MiB\")\n print(\"Free memory:\", info.free / 1048576, \"MiB\")\n print(\"Used memory:\", info.used / 1048576, \"MiB\")\n nvidia_smi.nvmlShutdown()\n\n return (\n objective,\n norm_sqd_diff,\n ctrl_loss_mean,\n ctrl_loss_std_err,\n trajectory,\n torch.mean(weight),\n torch.std(weight),\n stop_indicators,\n )" }, { "identifier": "define_variables", "path": "SOC_matching/experiment_settings/settings.py", "snippet": "def define_variables(cfg, ts):\n if (\n cfg.method.setting == \"OU_quadratic_easy\"\n or cfg.method.setting == \"OU_quadratic_hard\"\n ):\n if cfg.method.d == 2:\n x0 = torch.tensor([0.4, 0.6]).to(cfg.method.device)\n else:\n x0 = 0.5 * torch.randn(cfg.method.d).to(cfg.method.device)\n print(f\"x0: {x0}\")\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n if cfg.method.setting == \"OU_quadratic_hard\":\n A = 1.0 * torch.eye(cfg.method.d).to(cfg.method.device)\n P = 1.0 * torch.eye(cfg.method.d).to(cfg.method.device)\n Q = 0.5 * torch.eye(cfg.method.d).to(cfg.method.device)\n elif cfg.method.setting == \"OU_quadratic_easy\":\n A = 0.2 * torch.eye(cfg.method.d).to(cfg.method.device)\n P = 0.2 * torch.eye(cfg.method.d).to(cfg.method.device)\n Q = 0.1 * torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, A=A, P=P, Q=Q)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, A=A, P=P, Q=Q\n )\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"OU_linear\":\n x0 = torch.zeros(cfg.method.d).to(cfg.method.device)\n nu = 0.1\n xi = nu * torch.randn(cfg.method.d, cfg.method.d).to(cfg.method.device)\n omega = torch.ones(cfg.method.d).to(cfg.method.device)\n A = -torch.eye(cfg.method.d).to(cfg.method.device) + xi\n sigma = torch.eye(cfg.method.d).to(cfg.method.device) + xi\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, omega=omega, A=A)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, omega=omega, A=A\n )\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"double_well\":\n print(f\"double_well\")\n x0 = torch.zeros(cfg.method.d).to(cfg.method.device)\n\n kappa_i = 5\n nu_i = 3\n kappa = torch.ones(cfg.method.d).to(cfg.method.device)\n nu = torch.ones(cfg.method.d).to(cfg.method.device)\n kappa[0] = kappa_i\n kappa[1] = kappa_i\n kappa[2] = kappa_i\n nu[0] = nu_i\n nu[1] = nu_i\n nu[2] = nu_i\n\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, kappa=kappa, nu=nu)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, kappa=kappa, nu=nu\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"molecular_dynamics\":\n print(f\"molecular_dynamics\")\n x0 = -torch.ones(cfg.method.d).to(cfg.method.device)\n\n kappa = torch.ones(cfg.method.d).to(cfg.method.device)\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(\n cfg,\n ts,\n x0,\n sigma=sigma,\n kappa=kappa,\n )\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg,\n ts,\n x0,\n u_warm_start,\n sigma=sigma,\n kappa=kappa,\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"multiagent_8\":\n print(f\"multiagent_8\")\n x0 = torch.tensor(\n [\n -4.0,\n 4.5,\n -7.0,\n 4.5,\n -4.0,\n 1.5,\n -7.0,\n 1.5,\n -4.0,\n -1.5,\n -7.0,\n -1.5,\n -4.0,\n -4.5,\n -7.0,\n -4.5,\n ]\n ).to(cfg.method.device)\n\n g_center = torch.tensor(\n [\n 4.0,\n 4.5,\n 7.0,\n 4.5,\n 4.0,\n 1.5,\n 7.0,\n 1.5,\n 4.0,\n -1.5,\n 7.0,\n -1.5,\n 4.0,\n -4.5,\n 7.0,\n -4.5,\n ]\n ).to(cfg.method.device)\n g_coeff = 2.00\n f_coeff = 0.05\n\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(\n cfg,\n ts,\n x0,\n sigma=sigma,\n g_center=g_center,\n g_coeff=g_coeff,\n f_coeff=f_coeff,\n )\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg,\n ts,\n x0,\n u_warm_start,\n sigma=sigma,\n g_center=g_center,\n g_coeff=g_coeff,\n f_coeff=f_coeff,\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start" } ]
import torch import sys import logging import os import time import json import hydra import traceback from tqdm.notebook import tqdm from omegaconf import DictConfig from SOC_matching.utils import ( get_folder_name, get_file_name, control_objective, save_results, compute_EMA, normalization_constant, ) from SOC_matching.method import ( SOC_Solver, ) from SOC_matching.experiment_settings.settings import define_variables
11,688
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg) file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations) EMA_loss = 0 EMA_norm_sqd_diff = 0 EMA_coeff = 0.01 EMA_weight_mean_coeff = 0.002 x0, sigma, optimal_sde, neural_sde, u_warm_start = define_variables(cfg, ts) if optimal_sde is not None: ground_truth_control = optimal_sde.u else: ground_truth_control = None state0 = x0.repeat(cfg.optim.batch_size, 1) ########### Compute normalization constant and control L2 error for initial control ############ print( f"Estimating normalization constant and control L2 error for initial control..." ) ( normalization_const, normalization_const_std_error, norm_sqd_diff_mean, ) = normalization_constant( neural_sde, state0, ts, cfg, n_batches_normalization=512, ground_truth_control=ground_truth_control, ) print( f"Normalization_constant (mean and std. error): {normalization_const:5.8E} {normalization_const_std_error:5.8E}" ) if ground_truth_control is not None: print( f"Control L2 error for initial control: {norm_sqd_diff_mean / normalization_const}" ) ########### Compute control loss for optimal control ############ if optimal_sde is not None: ( optimal_control_objective_mean, optimal_control_objective_std_error,
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg) file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations) EMA_loss = 0 EMA_norm_sqd_diff = 0 EMA_coeff = 0.01 EMA_weight_mean_coeff = 0.002 x0, sigma, optimal_sde, neural_sde, u_warm_start = define_variables(cfg, ts) if optimal_sde is not None: ground_truth_control = optimal_sde.u else: ground_truth_control = None state0 = x0.repeat(cfg.optim.batch_size, 1) ########### Compute normalization constant and control L2 error for initial control ############ print( f"Estimating normalization constant and control L2 error for initial control..." ) ( normalization_const, normalization_const_std_error, norm_sqd_diff_mean, ) = normalization_constant( neural_sde, state0, ts, cfg, n_batches_normalization=512, ground_truth_control=ground_truth_control, ) print( f"Normalization_constant (mean and std. error): {normalization_const:5.8E} {normalization_const_std_error:5.8E}" ) if ground_truth_control is not None: print( f"Control L2 error for initial control: {norm_sqd_diff_mean / normalization_const}" ) ########### Compute control loss for optimal control ############ if optimal_sde is not None: ( optimal_control_objective_mean, optimal_control_objective_std_error,
) = control_objective(
2
2023-12-04 20:26:18+00:00
16k
yiwenlu66/learning-qp
experiments/tank/visualize_feasible_sets.py
[ { "identifier": "sys_param", "path": "src/envs/env_creators.py", "snippet": "def tank_initial_generator(size, device, rng):\ndef tank_ref_generator(size, device, rng):\ndef tank_randomizer(size, device, rng):\n B = torch.tensor(sys_param[\"tank\"][\"B\"], device=device, dtype=torch.float).unsqueeze(0)" }, { "identifier": "get_mpc_baseline_parameters", "path": "src/envs/mpc_baseline_parameters.py", "snippet": "def get_mpc_baseline_parameters(env_name, N, noise_std=0.):\n mpc_parameters = {\n \"n_mpc\": sys_param[env_name][\"n\"],\n \"m_mpc\": sys_param[env_name][\"m\"],\n \"N\": N,\n **sys_param[env_name],\n }\n if env_name == \"tank\":\n # Compute state and ref from obs: the first n entries of obs is state, and the latter n entries are ref\n mpc_parameters[\"obs_to_state_and_ref\"] = lambda obs: (obs[:, :mpc_parameters[\"n_mpc\"]], obs[:, mpc_parameters[\"n_mpc\"]:])\n A_nom = sys_param[env_name][\"A\"]\n A_max = np.copy(A_nom)\n A_max[tuple(zip(*[(0, 0), (0, 2), (1, 1), (1, 3), (2, 2), (3, 3)]))] += 0.002\n B_nom = sys_param[env_name][\"B\"]\n B_max = np.copy(B_nom)\n B_max *= 1.02\n mpc_parameters[\"A_scenarios\"] = [A_nom, A_max]\n mpc_parameters[\"B_scenarios\"] = [B_nom, B_max]\n n_mpc = mpc_parameters[\"n_mpc\"]\n mpc_parameters[\"w_scenarios\"] = [\n np.zeros((n_mpc, 1)),\n 3 * noise_std * np.ones((n_mpc, 1)),\n -3 * noise_std * np.ones((n_mpc, 1)),\n ]\n # mpc_parameters[\"max_disturbance_per_dim\"] = 0.5 * (3 * noise_std + 20 * 0.002 * 2 + 8 * 0.02 * 2)\n if env_name == \"cartpole\":\n # Compute A, B matrices for linearized system\n m_pole = mpc_parameters[\"m_pole_nom\"]\n m_cart = mpc_parameters[\"m_cart_nom\"]\n l = mpc_parameters[\"l_nom\"]\n g = 9.8\n\n # Continuous time A, B matrices\n A_ct = np.array([\n [0, 1, 0, 0],\n [0, 0, -g * m_pole / m_cart, 0],\n [0, 0, 0, 1],\n [0, 0, (m_cart + m_pole) * g / (l * m_cart) , 0],\n ])\n B_ct = np.array([\n [0],\n [1 / m_cart],\n [0],\n [-1 / (l * m_cart)],\n ])\n\n # Discretization\n dt = sys_param[env_name][\"dt\"]\n A = np.eye(4) + dt * A_ct\n B = dt * B_ct\n\n mpc_parameters[\"A\"] = A\n mpc_parameters[\"B\"] = B\n\n # Compute state and ref from obs: obs is in format (x, x_ref, x_dot, sin_theta, cos_theta, theta_dot)\n def obs_to_state_and_ref(obs):\n x, x_dot, theta, theta_dot, x_ref = obs[:, 0], obs[:, 1], obs[:, 2], obs[:, 3], obs[:, 4]\n state = torch.stack([x, x_dot, theta, theta_dot], dim=1)\n zeros = torch.zeros_like(x_ref)\n ref = torch.stack([x_ref, zeros, zeros, zeros], dim=1)\n return state, ref\n mpc_parameters[\"obs_to_state_and_ref\"] = obs_to_state_and_ref\n\n return mpc_parameters" }, { "identifier": "QPUnrolledNetwork", "path": "src/modules/qp_unrolled_network.py", "snippet": "class QPUnrolledNetwork(nn.Module):\n \"\"\"\n Learn a QP problem from the input using a MLP, then solve the QP using fixed number of unrolled PDHG iterations.\n\n Form of QP:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n where x in R^n, b in R^m.\n \"\"\"\n def __init__(\n self, device, input_size, n_qp, m_qp, qp_iter, mlp_builder,\n shared_PH=False,\n affine_qb=False,\n strict_affine_layer=False,\n obs_has_half_ref=False,\n symmetric=False,\n no_b=False,\n use_warm_starter=False,\n train_warm_starter=False,\n ws_loss_coef=1.,\n ws_update_rate=0.01,\n ws_loss_shaper=lambda x: x ** (1 / 2),\n mpc_baseline=None,\n use_osqp_for_mpc=False,\n imitate_mpc=False,\n use_residual_loss=False,\n force_feasible=False,\n feasible_lambda=10,\n is_test=False,\n ):\n \"\"\"mlp_builder is a function mapping (input_size, output_size) to a nn.Sequential object.\n\n If shared_PH == True, P and H are parameters indepedent of input, and q and b are functions of input;\n Otherwise, (P, H, q, b) are all functions of input.\n\n If affine_qb == True, then q and b are restricted to be affine functions of input.\n\n If strict_affine_layer == True (only effective when affine_qb=True), then:\n 1. q is linear w.r.t. (x0, xref) (no bias)\n 2. b is affine w.r.t. x0 (no dependence on xref)\n\n If obs_has_half_ref == True, the policy knows that the observation is in the form (x0, xref), with each taking up half of the dimension of the observation.\n\n If symmetric == True (only effective when affine_qb=True), then:\n 1. The bias terms are disabled in the modeling of q and b, i.e., q = Wq * x, b = Wb * x.\n 2. The constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0.\n\n If no_b == True in addition to symmetric == True, then b is skipped altogether, i.e., the constraint is assumed to be -1 <= Hx <= 1.\n\n If mpc_baseline != None and imitate_mpc == False, then the forward function directly returns the solution of the MPC problem, instead of solving the learned QP problem. Can be used for benchmarking MPC.\n\n If mpc_baseline != None and imitate_mpc == True, then the forward function returns the solution of the learned QP problem, but a loss term is computed using the MPC problem. Can be used for supervised imitation learning.\n\n If force_feasible == True, solve the following problem instead of the original QP problem:\n minimize_{x,y} (1/2)x'Px + q'x + lambda * y^2\n s.t. Hx + b + y * 1 >= 0, y >= 0,\n where x in R^n, y in R.\n In this case, the solution returned will be of dimension (n + 1).\n \"\"\"\n\n super().__init__()\n\n self.shared_PH = shared_PH\n self.affine_qb = affine_qb\n self.strict_affine_layer = strict_affine_layer\n self.obs_has_half_ref = obs_has_half_ref\n\n self.device = device\n self.input_size = input_size\n\n # QP dimensions: there are the number of variables and constraints WITHOUT considering the slack variable\n self.n_qp = n_qp\n self.m_qp = m_qp\n\n self.qp_iter = qp_iter\n\n self.symmetric = symmetric\n self.no_b = no_b\n\n self.n_P_param = n_qp * (n_qp + 1) // 2\n self.n_q_param = n_qp\n self.n_H_param = m_qp * n_qp\n self.n_b_param = m_qp if not self.no_b else 0\n\n self.n_mlp_output = 0\n if not self.shared_PH:\n self.n_mlp_output += (self.n_P_param + self.n_H_param)\n self.P_params = None\n self.H_params = None\n else:\n self.P_params = nn.Parameter(torch.randn((self.n_P_param,), device=device))\n self.H_params = nn.Parameter(torch.randn((self.n_H_param,), device=device))\n\n if not self.affine_qb:\n self.n_mlp_output += (self.n_q_param + self.n_b_param)\n self.qb_affine_layer = None\n else:\n if not self.strict_affine_layer:\n self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric)\n else:\n self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref)\n\n if self.n_mlp_output > 0:\n self.mlp = mlp_builder(input_size, self.n_mlp_output)\n else:\n self.mlp = None\n\n # TODO: add preconditioner\n self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None\n self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None\n self.train_warm_starter = train_warm_starter\n self.ws_loss_coef = ws_loss_coef\n self.ws_update_rate = ws_update_rate\n self.ws_loss_shaper = ws_loss_shaper\n\n # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True)\n self.fixed_PH = is_test and shared_PH\n\n # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning\n self.autonomous_losses = {}\n\n self.mpc_baseline = mpc_baseline\n self.use_osqp_for_mpc = use_osqp_for_mpc\n\n self.imitate_mpc = imitate_mpc\n\n # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem\n self.use_residual_loss = use_residual_loss\n\n # Whether to force the problem to be feasible\n self.force_feasible = force_feasible\n self.feasible_lambda = feasible_lambda\n\n self.solver = None\n\n self.info = {}\n\n # Reserved for storing the controllers for each simulation instance when robust MPC is enabled\n self.robust_controllers = []\n\n # Store info returned by env\n self.env_info = {}\n\n # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization\n self.is_active = None\n\n\n def initialize_solver(self):\n # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable)\n n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp\n m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp\n\n # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver\n # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead\n if not self.fixed_PH:\n self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible)\n else:\n # Should be called after loading state dict\n Pinv, H = self.get_PH()\n self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible)\n\n def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs):\n qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H])\n X0 = self.warm_starter(qd, bd, Pinvd, Hd)\n gt = solver_Xs[:, -1, :].detach()\n return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean())\n\n def parallel_controller_creation(self, controller_creator, xref_np, bs):\n \"\"\"\n Create robust MPC controlller in parallel\n \"\"\"\n # Helper function for parallel execution\n def task_creator(index):\n return controller_creator(self.mpc_baseline, xref_np[index, :])\n\n with ThreadPoolExecutor() as executor:\n # Executing the tasks in parallel\n results = executor.map(task_creator, range(bs))\n\n # Collecting the results\n self.robust_controllers.extend(results)\n\n def run_mpc_baseline(self, x, use_osqp_oracle=False):\n robust_method = self.mpc_baseline.get(\"robust_method\", None)\n x0, xref = self.mpc_baseline[\"obs_to_state_and_ref\"](x)\n bs = x.shape[0]\n\n # Conversions between torch and np\n t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float)\n f = lambda t: t.detach().cpu().numpy()\n f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy())\n\n if robust_method is None:\n # Run vanilla MPC without robustness\n eps = 1e-3\n n, m, P, q, H, b = mpc2qp(\n self.mpc_baseline[\"n_mpc\"],\n self.mpc_baseline[\"m_mpc\"],\n self.mpc_baseline[\"N\"],\n t(self.mpc_baseline[\"A\"]),\n t(self.mpc_baseline[\"B\"]),\n t(self.mpc_baseline[\"Q\"]),\n t(self.mpc_baseline[\"R\"]),\n self.mpc_baseline[\"x_min\"] + eps,\n self.mpc_baseline[\"x_max\"] - eps,\n self.mpc_baseline[\"u_min\"],\n self.mpc_baseline[\"u_max\"],\n x0,\n xref,\n normalize=self.mpc_baseline.get(\"normalize\", False),\n Qf=self.mpc_baseline.get(\"terminal_coef\", 0.) * t(np.eye(self.mpc_baseline[\"n_mpc\"])) if self.mpc_baseline.get(\"Qf\", None) is None else t(self.mpc_baseline[\"Qf\"]),\n )\n if not use_osqp_oracle:\n solver = QPSolver(x.device, n, m, P=P, H=H)\n Xs, primal_sols = solver(q, b, iters=100)\n sol = primal_sols[:, -1, :]\n else:\n osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True)\n if q.shape[0] > 1:\n sol_np, iter_counts = np_batch_op(osqp_oracle_with_iter_count, f(q), f(b), f_sparse(P), f_sparse(H))\n sol = t(sol_np)\n else:\n sol_np, iter_count = osqp_oracle_with_iter_count(f(q[0, :]), f(b[0, :]), f_sparse(P), f_sparse(H))\n sol = t(sol_np).unsqueeze(0)\n iter_counts = np.array([iter_count])\n # Save OSQP iteration counts into the info dict\n if \"osqp_iter_counts\" not in self.info:\n self.info[\"osqp_iter_counts\"] = iter_counts\n else:\n self.info[\"osqp_iter_counts\"] = np.concatenate([self.info[\"osqp_iter_counts\"], iter_counts])\n return sol, (P.unsqueeze(0), q, H.unsqueeze(0), b)\n\n elif robust_method in [\"scenario\", \"tube\"]:\n # Set up scenario or tube MPC\n if not self.robust_controllers:\n # Create a controller for each simulation instance, according to the current reference (note: this assumes that the mapping from instance index to reference is constant)\n controller_creator = {\n \"scenario\": scenario_robust_mpc,\n \"tube\": tube_robust_mpc,\n }[robust_method]\n xref_np = f(xref)\n self.parallel_controller_creation(controller_creator, xref_np, bs)\n self.is_active = np.ones((bs,), dtype=bool)\n\n # Get solutions according to current state\n x0_np = f(x0)\n already_on_stats = f(self.env_info.get(\"already_on_stats\", torch.zeros((bs,), dtype=bool))).astype(bool)\n self.is_active = np.logical_not(already_on_stats) & self.is_active # Skip computation for instances already done\n get_solution = lambda i: self.robust_controllers[i](x0_np[i, :], is_active=self.is_active[i])\n sol_np, running_time = np_batch_op(get_solution, np.arange(bs))\n sol = t(sol_np)\n\n # Save running time to info dict\n non_zero_mask = running_time != 0. # Filter out instances that are already done\n running_time_eff = running_time[non_zero_mask]\n if \"running_time\" not in self.info:\n self.info[\"running_time\"] = running_time_eff\n else:\n self.info[\"running_time\"] = np.concatenate([self.info[\"running_time\"], running_time_eff])\n\n return sol, None\n\n\n def get_PH(self, mlp_out=None):\n \"\"\"\n Compute P, H matrices from the parameters.\n Notice: returns (Pinv, H) instead of (P, H)\n \"\"\"\n # Decode MLP output\n end = 0\n if not self.shared_PH:\n start = end\n end = start + self.n_P_param\n P_params = mlp_out[:, start:end]\n start = end\n end = start + self.n_H_param\n H_params = mlp_out[:, start:end]\n else:\n P_params = self.P_params.unsqueeze(0)\n H_params = self.H_params.unsqueeze(0)\n\n # Reshape P, H vectors into matrices\n Pinv = make_psd(P_params, min_eig=1e-2)\n H = H_params.view(-1, self.m_qp, self.n_qp)\n\n # If the problem is forced to be feasible, compute the parameters (\\tilde{P}, \\tilde{H}) of the augmented problem\n # \\tilde{P} = [P, 0; 0, lambda]\n if self.force_feasible:\n zeros_n = torch.zeros((1, self.n_qp, 1), device=self.device)\n I = torch.eye(1, device=self.device).unsqueeze(0)\n tilde_P_inv = torch.cat([\n torch.cat([Pinv, zeros_n], dim=2),\n torch.cat([zeros_n.transpose(1, 2), 1 / self.feasible_lambda * I], dim=2)\n ], dim=1)\n # \\tilde{H} = [H, I; 0, I]\n ones_m = torch.ones((1, self.m_qp, 1), device=self.device)\n tilde_H = torch.cat([\n torch.cat([H, ones_m], dim=2),\n torch.cat([zeros_n.transpose(1, 2), I], dim=2)\n ], dim=1)\n Pinv, H = tilde_P_inv, tilde_H\n return Pinv, H\n\n def get_qb(self, x, mlp_out=None):\n \"\"\"\n Compute q, b vectors from the parameters.\n \"\"\"\n bs = x.shape[0]\n end = self.n_P_param + self.n_H_param if not self.shared_PH else 0\n if not self.affine_qb:\n start = end\n end = start + self.n_q_param\n q = mlp_out[:, start:end]\n start = end\n end = start + self.n_b_param\n b = mlp_out[:, start:end]\n else:\n qb = self.qb_affine_layer(x)\n q = qb[:, :self.n_q_param]\n b = qb[:, self.n_q_param:]\n if self.no_b:\n b = torch.zeros((bs, self.m_qp), device=self.device)\n\n # If the problem is forced to be feasible, compute the parameters (\\tilde{q}, \\tilde{b}) of the augmented problem\n if self.force_feasible:\n zeros_1 = torch.zeros((bs, 1), device=self.device)\n # \\tilde{q} = [q; 0]\n tilde_q = torch.cat([q, zeros_1], dim=1)\n # \\tilde{b} = [b; 0]\n tilde_b = torch.cat([b, zeros_1], dim=1)\n q, b = tilde_q, tilde_b\n\n return q, b\n\n def forward(self, x, return_problem_params=False, info=None):\n if info is not None:\n self.env_info = info\n if self.mpc_baseline is not None:\n mpc_sol, mpc_problem_params = self.run_mpc_baseline(x, use_osqp_oracle=self.use_osqp_for_mpc)\n\n if (self.mpc_baseline is not None) and (not self.imitate_mpc):\n # MPC solution is directly used as the final solution\n sol, problem_params = mpc_sol, mpc_problem_params\n else:\n # Check whether solver has been initialized\n if self.solver is None:\n self.initialize_solver()\n\n bs = x.shape[0]\n\n # Run MLP forward pass, if necessary\n if self.mlp is not None:\n mlp_out = self.mlp(x)\n else:\n mlp_out = None\n\n # Compute P, H, if they are not fixed\n if not self.fixed_PH:\n Pinv, H = self.get_PH(mlp_out)\n else:\n Pinv, H = None, None\n\n # Compute q, b\n q, b = self.get_qb(x, mlp_out)\n\n # Update parameters of warm starter with a delay to stabilize training\n if self.train_warm_starter:\n self.warm_starter_delayed.load_state_dict(interpolate_state_dicts(self.warm_starter_delayed.state_dict(), self.warm_starter.state_dict(), self.ws_update_rate))\n\n # Run solver forward\n if self.use_residual_loss:\n Xs, primal_sols, residuals = self.solver(q, b, Pinv=Pinv, H=H, iters=self.qp_iter, return_residuals=True)\n primal_residual, dual_residual = residuals\n residual_loss = ((primal_residual ** 2).sum(dim=-1) + (dual_residual ** 2).sum(dim=-1)).mean()\n self.autonomous_losses[\"residual\"] = 1e-3 * residual_loss\n else:\n Xs, primal_sols = self.solver(q, b, Pinv=Pinv, H=H, iters=self.qp_iter)\n sol = primal_sols[:, -1, :]\n\n # Compute warm starter loss\n if self.train_warm_starter:\n self.autonomous_losses[\"warm_starter\"] = self.compute_warm_starter_loss(q, b, Pinv, H, Xs)\n\n # Compute imitation loss\n if self.imitate_mpc:\n # Use min(n of learned qp, n of mpc) as the common dimension of solution\n sol_dim = min(self.n_qp, mpc_sol.shape[-1])\n self.autonomous_losses[\"imitation_only\"] = ((sol[:, :sol_dim] - mpc_sol[:, :sol_dim]) ** 2).sum(dim=-1).mean()\n\n if return_problem_params:\n problem_params = (torch.linalg.inv(Pinv), q, H, b)\n\n if not return_problem_params:\n # Only return the solution\n return sol\n else:\n # Return the solution as well as (P, q, H, b)\n return sol, problem_params" }, { "identifier": "bmv", "path": "src/utils/torch_utils.py", "snippet": "def bmv(A, b):\n \"\"\"Compute matrix multiply vector in batch mode.\"\"\"\n bs = b.shape[0]\n if A.shape[0] == 1:\n # The same A for different b's; use matrix multiplication instead of broadcasting\n return (A.squeeze(0) @ b.t()).t()\n else:\n return (A @ b.unsqueeze(-1)).squeeze(-1)" }, { "identifier": "plot_multiple_2d_polytopes_with_contour", "path": "src/utils/visualization.py", "snippet": "def plot_multiple_2d_polytopes_with_contour(polytope_contour_params):\n \"\"\"\n Plot multiple 2D polytopes each defined by Ax <= b and overlay the contour of a quadratic function.\n \n Parameters:\n - polytope_contour_params (list of dict): List of dictionaries containing A, b, optimal_solution, P, q, and label.\n \n Returns:\n - fig (matplotlib.figure.Figure): Figure object.\n - ax (matplotlib.axes._subplots.AxesSubplot): Axis object.\n \"\"\"\n \n fig, ax = plt.subplots()\n \n # Determine global x and y limits\n all_vertices = []\n for params in polytope_contour_params:\n interior_point = find_interior_point(params['A'], params['b'])\n if interior_point is not None:\n vertices = HalfspaceIntersection(np.hstack([params['A'], -params['b'][:, np.newaxis]]), interior_point).intersections\n all_vertices.append(vertices)\n all_vertices = np.vstack(all_vertices)\n \n margin = 0.5 # Additional margin around the polytopes\n x_range = np.max(all_vertices[:, 0]) - np.min(all_vertices[:, 0])\n y_range = np.max(all_vertices[:, 1]) - np.min(all_vertices[:, 1])\n max_range = max(x_range, y_range) + 2 * margin\n x_margin = (max_range - x_range) / 2\n y_margin = (max_range - y_range) / 2\n x_min, x_max = np.min(all_vertices[:, 0]) - x_margin, np.max(all_vertices[:, 0]) + x_margin\n y_min, y_max = np.min(all_vertices[:, 1]) - y_margin, np.max(all_vertices[:, 1]) + y_margin\n x_grid, y_grid = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100))\n \n custom_legend_handles = []\n \n for params in polytope_contour_params:\n A, b, P, q, color, label = params['A'], params['b'], params['P'], params['q'], params['color'], params['label']\n optimal_solution = params.get(\"optimal_solution\", None)\n \n # Find an interior point\n interior_point = find_interior_point(A, b)\n if interior_point is None:\n continue # Skip this polytope if LP is infeasible\n \n # Plot polytope\n halfspace_intersection = HalfspaceIntersection(np.hstack([A, -b[:, np.newaxis]]), interior_point)\n vertices = halfspace_intersection.intersections\n hull = ConvexHull(vertices)\n ordered_vertices = vertices[hull.vertices]\n closed_loop = np.vstack([ordered_vertices, ordered_vertices[0]])\n \n ax.fill(closed_loop[:, 0], closed_loop[:, 1], alpha=0.3, color=color, label=f\"{label} (Polytope)\")\n ax.plot(closed_loop[:, 0], closed_loop[:, 1], color=color)\n \n # Mark the optimal solution\n if optimal_solution is not None:\n ax.plot(optimal_solution[0], optimal_solution[1], 'o', color=color)\n \n # Evaluate quadratic function\n Z = np.zeros_like(x_grid)\n for i in range(x_grid.shape[0]):\n for j in range(x_grid.shape[1]):\n x_vec = np.array([x_grid[i, j], y_grid[i, j]])\n Z[i, j] = 0.5 * x_vec.T @ P @ x_vec + q.T @ x_vec\n \n # Plot contour\n contour = ax.contour(x_grid, y_grid, Z, levels=5, colors=color) # Reduced number of levels for sparser contour\n\n # Create a custom legend handle\n custom_legend_handles.append(Line2D([0], [0], color=color, lw=4, label=label))\n\n # Adjust plot settings\n ax.set_aspect('equal', adjustable='box')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n \n # Add custom legend\n if custom_legend_handles:\n # Move legend outside the plot\n ax.legend(handles=custom_legend_handles, loc='upper left', bbox_to_anchor=(1, 1))\n # Adjust layout to prevent clipping\n plt.tight_layout(rect=[0, 0, 0.85, 1])\n \n return fig, ax" }, { "identifier": "high_dim_to_2D_sampling", "path": "src/utils/geometry.py", "snippet": "def high_dim_to_2D_sampling(A, b, grid_size=50, x_range=(-1, 1)):\n \"\"\"\n Converts a high-dimensional polytope {x | Ax <= b} to its 2D projection {x | A_proj x <= b_proj}\n using a sampling-based approximation method.\n \n Parameters:\n - A (numpy.ndarray): The coefficient matrix for the high-dimensional inequalities.\n - b (numpy.ndarray): The constant terms for the high-dimensional inequalities.\n - grid_size (int): The number of grid points along each dimension in the sampling grid.\n - x_range (tuple): The range (min, max) for both x1 and x2 in the 2D plane.\n \n Returns:\n - A_2D (numpy.ndarray): The coefficient matrix for the 2D inequalities.\n - b_2D (numpy.ndarray): The constant terms for the 2D inequalities.\n \"\"\"\n \n def sample_based_projection_LP(A, b, x1_range, x2_range, grid_size):\n x1_min, x1_max = x1_range\n x2_min, x2_max = x2_range\n x1_vals = np.linspace(x1_min, x1_max, grid_size)\n x2_vals = np.linspace(x2_min, x2_max, grid_size)\n grid_points = np.array([[x1, x2] for x1 in x1_vals for x2 in x2_vals])\n feasible_points = []\n for point in grid_points:\n x_dim = np.zeros(A.shape[1])\n x_dim[:2] = point\n c = np.zeros(A.shape[1] - 2)\n A_ub = A[:, 2:]\n b_ub = b - np.dot(A[:, :2], point)\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=(None, None), method='highs')\n if res.success:\n feasible_points.append(point)\n feasible_points = np.array(feasible_points)\n if feasible_points.shape[0] < 3:\n return \"Insufficient feasible points for a 2D polytope.\"\n hull = ConvexHull(feasible_points)\n vertices = hull.points[hull.vertices]\n return vertices\n \n # Step 1: Sample points and find the approximated vertices in 2D\n vertices_approx = sample_based_projection_LP(A, b, x_range, x_range, grid_size)\n \n # Step 2: Find supporting hyperplanes in 2D\n A_2D, b_2D = find_supporting_hyperplanes(vertices_approx)\n \n return A_2D, b_2D" }, { "identifier": "partial_minimization_2D", "path": "src/utils/geometry.py", "snippet": "def partial_minimization_2D(P, q):\n \"\"\"\n Performs partial minimization over dimensions starting from 3 to obtain a 2D quadratic function.\n \n Parameters:\n - P (numpy.ndarray): The coefficient matrix for the high-dimensional quadratic function.\n - q (numpy.ndarray): The coefficient vector for the high-dimensional quadratic function.\n \n Returns:\n - P_2D (numpy.ndarray): The 2x2 coefficient matrix for the resulting 2D quadratic function.\n - q_2D (numpy.ndarray): The 2D coefficient vector for the resulting 2D quadratic function.\n - c (float): The constant bias term for the resulting 2D quadratic function.\n \"\"\"\n # Decompose P into P11, P12, P21, P22\n P11 = P[:2, :2]\n P12 = P[:2, 2:]\n P21 = P[2:, :2]\n P22 = P[2:, 2:]\n \n # Decompose q into q1 and q2\n q1 = q[:2]\n q2 = q[2:]\n\n # Compute the 2D quadratic function parameters\n P_2D = P11 - P12 @ np.linalg.inv(P22) @ P21\n q_2D = q1 - P12 @ np.linalg.inv(P22) @ q2\n c = -0.5 * q2 @ np.linalg.inv(P22) @ q2\n\n return P_2D, q_2D, c" } ]
import numpy as np import sys import os import torch from src.envs.env_creators import sys_param, env_creators from src.envs.mpc_baseline_parameters import get_mpc_baseline_parameters from src.modules.qp_unrolled_network import QPUnrolledNetwork from matplotlib import pyplot as plt from icecream import ic from src.utils.torch_utils import bmv from src.utils.visualization import plot_multiple_2d_polytopes_with_contour from src.utils.geometry import high_dim_to_2D_sampling, partial_minimization_2D
10,828
return results def evaluate_constraint_violation(trained_with_residual_loss, steps=10, forced_feasibility=False): """Rollout for multiple steps, and compute average (number of violated constraints, magnitude of violation).""" rollout_results = rollout(trained_with_residual_loss, False, steps, forced_feasibility) constraint_violation_indices = [] for i in range(steps): H = rollout_results[i][2] action_all = rollout_results[i][4] b = rollout_results[i][3] constraint_violation_indices.append(compute_violation(H, action_all, b)) average_violation_count = torch.stack([v[0] for v in constraint_violation_indices], dim=0).to(dtype=torch.float).mean(dim=0) average_violation_magnitude = torch.stack([v[1] for v in constraint_violation_indices], dim=0).mean(dim=0) return average_violation_count, average_violation_magnitude violation_count_with_residual_loss, violation_magnitude_with_residual_loss = evaluate_constraint_violation(True) violation_count_without_residual_loss, violation_magnitude_without_residual_loss = evaluate_constraint_violation(False) ic(violation_count_with_residual_loss, violation_count_without_residual_loss) ic(violation_magnitude_with_residual_loss, violation_magnitude_without_residual_loss) # %% Visualize the feasible set and objective function at a certain step, ignoring constraints that are violated at_step = 10 def get_violated_mask(H, action_all, b): z_recovered = bmv(H, action_all) + b return torch.where(z_recovered < 0., torch.ones_like(z_recovered), torch.zeros_like(z_recovered)) def get_step_parameters(at_step, trained_with_residual_loss, is_mpc, forced_feasibility=False): rollout_results = rollout(trained_with_residual_loss, is_mpc, at_step, forced_feasibility) result_last_step = rollout_results[-1] P, q, H, b, action_all = result_last_step violated_mask = get_violated_mask(H, action_all, b) return P, q, H, b, violated_mask, action_all def get_plot_parameters(trained_with_residual_loss, is_mpc, color, label, is_forced_feasibility=False): a = lambda t: t.squeeze(0).detach().cpu().numpy() global P, q, H, b, violated_mask, action_all P, q, H, b, violated_mask, action_all = get_step_parameters(at_step, trained_with_residual_loss, is_mpc, is_forced_feasibility) if not is_forced_feasibility: # Filter out violated constraints satisfied_mask = torch.logical_not(violated_mask) plot_params = { "A": a(-H[satisfied_mask, :]), "b": a(b[satisfied_mask]), "optimal_solution": a(action_all[:, :m_sys]), "P": a(P), "q": a(q), "color": color, "label": label, } else: # Learned problem with forced feasibility; recover original P, q, H, b from augmented P, q, H, b y = action_all[:, -1].item() P0 = P[:, :n, :n] q0 = q[:, :n] H0 = H[:, :m, :n] b0 = b[:, :m] + y plot_params = { "A": a(-H0), "b": a(b0), "optimal_solution": a(action_all[:, :m_sys]), "P": a(P0), "q": a(q0), "color": color, "label": label, } return plot_params fig, ax = plot_multiple_2d_polytopes_with_contour([ get_plot_parameters(True, False, "blue", "Learned QP (with residual loss)"), get_plot_parameters(False, False, "red", "Learned QP (w/o residual loss)"), get_plot_parameters(False, True, "green", "MPC") ]) ax.set_xlabel("$u_1$") ax.set_ylabel("$u_2$") ax.set_title(f"Feasible sets and objective functions at step {at_step}") # %% Visualize the feasible set and objective function at a certain step, forcing feasibility fig, ax = plot_multiple_2d_polytopes_with_contour([ get_plot_parameters(True, False, "blue", "Learned QP (forced feasibility, n=2)", True), get_plot_parameters(False, True, "green", "MPC (N=1)", True) ]) ax.set_xlabel("$u_1$") ax.set_ylabel("$u_2$") ax.set_title(f"Feasible sets and objective functions at step {at_step}") # %% Visualize feasible set vs. MPC; Now # 1. The learned QP is guaranteed to be feasible; no need to ignore violated constraints # 2. The variable are allowed to be high-dimensional; we project the constraint polytope and the quadratic objective to 2D n = 8 m = 32 mpc_N = 4 at_step = 50 mpc_baseline = get_mpc_baseline_parameters("tank", mpc_N) mpc_baseline["normalize"] = True # Solve for normalized action, to be consistent with learned QP mpc_module = QPUnrolledNetwork( device, input_size, n, m, qp_iter, None, True, True, mpc_baseline=mpc_baseline, use_osqp_for_mpc=True, ) def get_plot_parameters_proj(is_mpc, color, label): a = lambda t: t.squeeze(0).detach().cpu().numpy() P, q, H, b, violated_mask, action_all = get_step_parameters(at_step, False, is_mpc, True) if not is_mpc: # Learned problem with forced feasibility; recover original P, q, H, b from augmented P, q, H, b y = action_all[:, -1].item() P0 = P[:, :n, :n] q0 = q[:, :n] H0 = H[:, :m, :n] b0 = b[:, :m] + y else: P0, q0, H0, b0 = P, q, H, b A_proj, b_proj = high_dim_to_2D_sampling(-a(H0), a(b0))
# %% Specify test case # Case where MPC is better x0 = np.array([10., 10., 10., 10.]) x_ref = np.array([19, 19, 2.4, 2.4]) # # Case where MPC fails # x0 = np.array([ 5.4963946, 10.947876, 1.034516, 18.08066 ]) # x_ref = np.array([7.522859, 8.169776, 1.1107684, 1. ]) # Controlling process noise and parametric uncertainty noise_level = 0 parametric_uncertainty = False parameter_randomization_seed = 2 # %% Set up test bench file_path = os.path.dirname(__file__) sys.path.append(os.path.join(file_path, "../..")) # Utilities def make_obs(x, x_ref, running_mean, running_std, normalize): raw_obs = torch.tensor(np.concatenate([x, x_ref]), device=device, dtype=torch.float) if not normalize: return raw_obs.unsqueeze(0) else: return ((raw_obs - running_mean) / running_std).unsqueeze(0) def get_state_dict(checkpoint_path): checkpoint = torch.load(checkpoint_path) model = checkpoint["model"] prefix = "a2c_network.policy_net." policy_net_state_dict = {k.lstrip(prefix): v for (k, v) in model.items() if k.startswith(prefix)} if "running_mean_std.running_mean" in model: running_mean = model["running_mean_std.running_mean"].to(dtype=torch.float) running_std = model["running_mean_std.running_var"].sqrt().to(dtype=torch.float) else: running_mean = torch.tensor([0.]) running_std = torch.tensor([1.]) return policy_net_state_dict, running_mean, running_std def rescale_action(action, low=-1., high=8.): action = action.clamp(-1., 1.) return low + (high - low) * (action + 1) / 2 t = lambda arr: torch.tensor(arr, device=device, dtype=torch.float).unsqueeze(0) a = lambda t: t.detach().cpu().numpy() # Constants and options n_sys = 4 m_sys = 2 input_size = 8 # 4 for x, 4 for x_ref n = 2 m = 64 qp_iter = 10 device = "cuda:0" # MPC module mpc_baseline = get_mpc_baseline_parameters("tank", 1) mpc_baseline["normalize"] = True # Solve for normalized action, to be consistent with learned QP mpc_module = QPUnrolledNetwork( device, input_size, n, m, qp_iter, None, True, True, mpc_baseline=mpc_baseline, use_osqp_for_mpc=True, ) # Environment env = env_creators["tank"]( noise_level=noise_level, bs=1, max_steps=300, keep_stats=True, run_name="", exp_name="", randomize=parametric_uncertainty, ) # %% Compare learned QPs learned with / without residual loss, and compare degree of constraint violation def get_qp_net(trained_with_residual_loss, forced_feasibility=False): exp_name = f"residual_loss_{'on' if trained_with_residual_loss else 'off'}" if forced_feasibility: exp_name = "force_feasible_on" net = QPUnrolledNetwork(device, input_size, n, m, qp_iter, None, True, True, force_feasible=forced_feasibility) if parametric_uncertainty: exp_name += "+rand" checkpoint_path = f"runs/tank_{exp_name}/nn/tank.pth" policy_net_state_dict, running_mean, running_std = get_state_dict(checkpoint_path) net.load_state_dict(policy_net_state_dict) running_mean, running_std = running_mean.to(device=device), running_std.to(device=device) net.to(device) return net, running_mean, running_std def compute_violation(H, action_all, b): """ Number of violated constraints, as well as magnitude of constraint violation. """ z_recovered = bmv(H, action_all) + b violation_count = (z_recovered < 0.).sum(dim=-1) violation_magnitude = torch.norm(z_recovered.clamp(-torch.inf, 0.), dim=-1) return violation_count, violation_magnitude def rollout(trained_with_residual_loss, is_mpc, steps, forced_feasibility=False): net, running_mean, running_std = get_qp_net(trained_with_residual_loss, forced_feasibility) if is_mpc: net = mpc_module results = [] env.reset(t(x0), t(x_ref), randomize_seed=parameter_randomization_seed) x = x0 obs = make_obs(x, x_ref, running_mean, running_std, not is_mpc) for i in range(steps): action_all, problem_params = net(obs, return_problem_params=True) u = rescale_action(action_all[:, :m_sys]) raw_obs, reward, done_t, info = env.step(u) if not is_mpc: obs = (raw_obs - running_mean) / running_std else: obs = raw_obs done = done_t.item() P, q, H, b = problem_params results.append((P, q, H, b, action_all)) return results def evaluate_constraint_violation(trained_with_residual_loss, steps=10, forced_feasibility=False): """Rollout for multiple steps, and compute average (number of violated constraints, magnitude of violation).""" rollout_results = rollout(trained_with_residual_loss, False, steps, forced_feasibility) constraint_violation_indices = [] for i in range(steps): H = rollout_results[i][2] action_all = rollout_results[i][4] b = rollout_results[i][3] constraint_violation_indices.append(compute_violation(H, action_all, b)) average_violation_count = torch.stack([v[0] for v in constraint_violation_indices], dim=0).to(dtype=torch.float).mean(dim=0) average_violation_magnitude = torch.stack([v[1] for v in constraint_violation_indices], dim=0).mean(dim=0) return average_violation_count, average_violation_magnitude violation_count_with_residual_loss, violation_magnitude_with_residual_loss = evaluate_constraint_violation(True) violation_count_without_residual_loss, violation_magnitude_without_residual_loss = evaluate_constraint_violation(False) ic(violation_count_with_residual_loss, violation_count_without_residual_loss) ic(violation_magnitude_with_residual_loss, violation_magnitude_without_residual_loss) # %% Visualize the feasible set and objective function at a certain step, ignoring constraints that are violated at_step = 10 def get_violated_mask(H, action_all, b): z_recovered = bmv(H, action_all) + b return torch.where(z_recovered < 0., torch.ones_like(z_recovered), torch.zeros_like(z_recovered)) def get_step_parameters(at_step, trained_with_residual_loss, is_mpc, forced_feasibility=False): rollout_results = rollout(trained_with_residual_loss, is_mpc, at_step, forced_feasibility) result_last_step = rollout_results[-1] P, q, H, b, action_all = result_last_step violated_mask = get_violated_mask(H, action_all, b) return P, q, H, b, violated_mask, action_all def get_plot_parameters(trained_with_residual_loss, is_mpc, color, label, is_forced_feasibility=False): a = lambda t: t.squeeze(0).detach().cpu().numpy() global P, q, H, b, violated_mask, action_all P, q, H, b, violated_mask, action_all = get_step_parameters(at_step, trained_with_residual_loss, is_mpc, is_forced_feasibility) if not is_forced_feasibility: # Filter out violated constraints satisfied_mask = torch.logical_not(violated_mask) plot_params = { "A": a(-H[satisfied_mask, :]), "b": a(b[satisfied_mask]), "optimal_solution": a(action_all[:, :m_sys]), "P": a(P), "q": a(q), "color": color, "label": label, } else: # Learned problem with forced feasibility; recover original P, q, H, b from augmented P, q, H, b y = action_all[:, -1].item() P0 = P[:, :n, :n] q0 = q[:, :n] H0 = H[:, :m, :n] b0 = b[:, :m] + y plot_params = { "A": a(-H0), "b": a(b0), "optimal_solution": a(action_all[:, :m_sys]), "P": a(P0), "q": a(q0), "color": color, "label": label, } return plot_params fig, ax = plot_multiple_2d_polytopes_with_contour([ get_plot_parameters(True, False, "blue", "Learned QP (with residual loss)"), get_plot_parameters(False, False, "red", "Learned QP (w/o residual loss)"), get_plot_parameters(False, True, "green", "MPC") ]) ax.set_xlabel("$u_1$") ax.set_ylabel("$u_2$") ax.set_title(f"Feasible sets and objective functions at step {at_step}") # %% Visualize the feasible set and objective function at a certain step, forcing feasibility fig, ax = plot_multiple_2d_polytopes_with_contour([ get_plot_parameters(True, False, "blue", "Learned QP (forced feasibility, n=2)", True), get_plot_parameters(False, True, "green", "MPC (N=1)", True) ]) ax.set_xlabel("$u_1$") ax.set_ylabel("$u_2$") ax.set_title(f"Feasible sets and objective functions at step {at_step}") # %% Visualize feasible set vs. MPC; Now # 1. The learned QP is guaranteed to be feasible; no need to ignore violated constraints # 2. The variable are allowed to be high-dimensional; we project the constraint polytope and the quadratic objective to 2D n = 8 m = 32 mpc_N = 4 at_step = 50 mpc_baseline = get_mpc_baseline_parameters("tank", mpc_N) mpc_baseline["normalize"] = True # Solve for normalized action, to be consistent with learned QP mpc_module = QPUnrolledNetwork( device, input_size, n, m, qp_iter, None, True, True, mpc_baseline=mpc_baseline, use_osqp_for_mpc=True, ) def get_plot_parameters_proj(is_mpc, color, label): a = lambda t: t.squeeze(0).detach().cpu().numpy() P, q, H, b, violated_mask, action_all = get_step_parameters(at_step, False, is_mpc, True) if not is_mpc: # Learned problem with forced feasibility; recover original P, q, H, b from augmented P, q, H, b y = action_all[:, -1].item() P0 = P[:, :n, :n] q0 = q[:, :n] H0 = H[:, :m, :n] b0 = b[:, :m] + y else: P0, q0, H0, b0 = P, q, H, b A_proj, b_proj = high_dim_to_2D_sampling(-a(H0), a(b0))
P_proj, q_proj, _ = partial_minimization_2D(a(P0), a(q0))
6
2023-11-28 05:56:22+00:00
16k
Fraunhofer-SCAI/llamol
sample.py
[ { "identifier": "Transformer", "path": "model.py", "snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs, context_params: ContextArgs):\n super().__init__()\n self.params = params\n self.context_params = context_params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)\n\n self.frag_embeddings = nn.Embedding(params.vocab_size, params.dim)\n self.frag_type_embedding = nn.Embedding(1, params.dim)\n\n self.context_lookup = {k: i for i, k in enumerate(context_params.context_keys)}\n self.conditions_type_embeddings = nn.Embedding(\n len(context_params.context_keys), params.dim\n )\n self.conditions_embeddings_lookup = nn.ModuleDict(\n {\n k: nn.Sequential(\n nn.Linear(dim, params.dim, bias=True),\n )\n for k, dim in zip(\n context_params.context_keys, context_params.context_dims\n )\n }\n )\n\n self.dropout = nn.Dropout(params.dropout)\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = nn.Linear(params.dim, params.vocab_size, bias=False)\n\n # share the unembedding parameters with the embedding parameters\n self.tok_embeddings.weight = (\n self.output.weight\n ) # https://paperswithcode.com/method/weight-tying\n\n # some useful precompute for the RoPE relative positional embeddings\n freqs_cos, freqs_sin = precompute_freqs_cis(\n self.params.dim // self.params.n_heads, self.params.max_seq_len\n )\n self.register_buffer(\"freqs_cos\", freqs_cos, persistent=False)\n self.register_buffer(\"freqs_sin\", freqs_sin, persistent=False)\n\n # init all weights\n self.apply(self._init_weights)\n # apply special scaled init to the residual projections, per GPT-2 paper\n for pn, p in self.named_parameters():\n if pn.endswith(\"w3.weight\") or pn.endswith(\"wo.weight\"):\n torch.nn.init.normal_(\n p, mean=0.0, std=0.02 / math.sqrt(2 * params.n_layers)\n )\n\n # Initialize attribute for the loss of the last forward call. This will be set if the forward is called with a targets tensor.\n self.last_loss = None\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n\n def forward(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n\n freqs_cos = self.freqs_cos[:seqlen]\n freqs_sin = self.freqs_sin[:seqlen]\n\n for layer in self.layers:\n h = layer(h, freqs_cos, freqs_sin)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def forward_with_kvcache(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n cache_id: int = 1,\n pos_seq_len: Optional[int] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n if pos_seq_len is None:\n pos_seq_len = seqlen\n else:\n pos_seq_len = max(seqlen, pos_seq_len + context_seq_len)\n\n freqs_cos = self.freqs_cos[:pos_seq_len]\n freqs_sin = self.freqs_sin[:pos_seq_len]\n\n for layer in self.layers:\n h = layer.forward_with_kvcache(h, freqs_cos, freqs_sin, cache_id=cache_id)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def _add_context_to_seq(self, tokens, context, fragment, bsz, device):\n h = self.tok_embeddings(tokens)\n h = self.dropout(h)\n\n if fragment is not None:\n fragment_type_enc = torch.zeros_like(\n fragment, dtype=torch.long, device=device\n )\n\n h = torch.concat(\n (\n self.tok_embeddings(fragment)\n + self.frag_embeddings(fragment)\n + self.frag_type_embedding(fragment_type_enc),\n h,\n ),\n dim=1,\n )\n\n if context is not None and len(context) != 0:\n # context is a dictionary with key : context_tensor of shape (batch_size, context_dim)\n type_ids = []\n context_vals = []\n\n for emb_key, context_val in context.items():\n emb_context_val = self.conditions_embeddings_lookup[emb_key](\n context_val.unsqueeze(1).to(device)\n ).unsqueeze(1)\n\n context_vals.append(emb_context_val)\n type_ids_tensor = torch.tensor(\n [self.context_lookup[emb_key]], device=device, dtype=torch.long\n )\n type_ids.append(type_ids_tensor)\n\n context_types = (\n torch.concat(type_ids, dim=0).reshape(-1, 1).expand(-1, bsz).T\n )\n # shape(len(context),batch_size, emb_size)\n context_types = self.conditions_type_embeddings(context_types)\n\n context_vals = torch.concat(context_vals, dim=1).to(device)\n\n # SHAPE\n h = torch.concat([context_vals + context_types, h], dim=1)\n return h\n\n def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):\n # start with all of the candidate parameters\n param_dict = {pn: p for pn, p in self.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n print(\n f\"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters\"\n )\n print(\n f\"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters\"\n )\n # Create AdamW optimizer and use the fused version if it is available\n fused_available = \"fused\" in inspect.signature(torch.optim.AdamW).parameters\n use_fused = fused_available and device_type == \"cuda\"\n extra_args = dict(fused=True) if use_fused else dict()\n optimizer = torch.optim.AdamW(\n optim_groups, lr=learning_rate, betas=betas, **extra_args\n )\n print(f\"using fused AdamW: {use_fused}\")\n\n return optimizer\n\n def estimate_mfu(self, fwdbwd_per_iter, dt):\n \"\"\"estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS\"\"\"\n # first estimate the number of flops we do per iteration.\n # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311\n N = sum(p.numel() for p in self.parameters())\n cfg = self.params\n L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim // cfg.n_heads, cfg.max_seq_len\n flops_per_token = 6 * N + 12 * L * H * Q * T\n flops_per_fwdbwd = flops_per_token * T\n flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter\n # express our flops throughput as ratio of A100 bfloat16 peak flops\n flops_achieved = flops_per_iter * (1.0 / dt) # per second\n flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS\n mfu = flops_achieved / flops_promised\n return mfu\n\n @torch.inference_mode()\n def generate(\n self,\n tokenizer: SmilesTokenizer,\n context: Union[torch.Tensor, None] = None,\n fragments: Union[torch.Tensor, None] = None,\n max_length: int = 50,\n num_gen: int = 200,\n start_smiles: Union[str, None] = None,\n temperature: float = 1.0,\n top_k: Union[int, None] = None,\n device: torch.device = torch.device(\"cpu\"),\n cache_kv: bool = False,\n ) -> List[str]:\n batch_size = num_gen\n if start_smiles is not None:\n tokenized_start_selfie = tokenizer.encode(start_smiles)[\n :-1\n ] # remove <eos> token\n tokenized_start_selfie = torch.tensor(\n tokenized_start_selfie, device=device, dtype=torch.long\n ).view(-1, 1)\n tokenized_start_selfie = tokenized_start_selfie.repeat(1, batch_size)\n\n outputs = tokenized_start_selfie.T\n else:\n outputs = (\n torch.LongTensor([[tokenizer.cls_token_id] * batch_size]).to(device)\n ).T # batch_size\n self.eval()\n\n start_len = outputs.shape[1]\n has_end_idx = np.array([0] * batch_size)\n cache_id = np.random.randint(0, int(1e10), 1).item()\n with torch.no_grad():\n with tqdm(total=max_length, desc=\"Generation\") as pbar:\n for i in range(start_len, max_length):\n # trg_tensor = #torch.LongTensor(outputs).to(model.device)\n if not cache_kv:\n logits = self(outputs, context=context, fragment=fragments)\n else:\n # logits_ = self(outputs, context=context, fragment=fragments)\n if i == start_len:\n # When starting pass the whole input, so that \"start_smiles\" works, then only the newly generated token, because of the cache\n func_input = outputs\n else:\n func_input = outputs[:, -1].unsqueeze(-1)\n logits = self.forward_with_kvcache(\n func_input,\n context=context,\n fragment=fragments,\n cache_id=cache_id,\n pos_seq_len=outputs.size(-1),\n )\n\n # raise NotImplementedError(\"Currently not working / right implemented\")\n # logits = self.forward_with_kvcache(outputs, context=context, fragment=fragments,cache_id = cache_id)\n\n logits = logits[:, -1, :] # crop to just the final time step\n if temperature == 0.0:\n # \"sample\" the single most likely index\n _, logits = torch.topk(logits, k=1, dim=-1)\n else:\n # pluck the logits at the final step and scale by desired temperature\n logits = logits / temperature\n # optionally crop the logits to only the top k options\n if top_k is not None:\n v, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n logits[logits < v[:, [-1]]] = -float(\"Inf\")\n\n probs = F.softmax(logits, dim=-1)\n idx_next = torch.multinomial(probs, num_samples=1)\n\n ended_sentences = idx_next == tokenizer.sep_token_id\n if torch.count_nonzero(ended_sentences) != 0:\n indicies = torch.nonzero(ended_sentences)\n indicies = indicies.cpu().numpy()\n for end_idx in indicies[:, 0]:\n if has_end_idx[end_idx] == 0:\n has_end_idx[end_idx] = i\n\n # print(has_end_idx)\n\n if all([idx != 0 for idx in has_end_idx]):\n break\n\n # outputs.append(best_guesses)\n # outputs = torch.row_stack((outputs, idx_next))\n outputs = torch.cat((outputs, idx_next), dim=1)\n pbar.update(1)\n\n out_selfies = []\n for output, end_idx in zip(outputs.cpu().numpy(), has_end_idx):\n # Incase of limiting the max_len\n if end_idx == 0:\n selfie = [tokenizer._convert_id_to_token(idx) for idx in output[:]]\n else:\n selfie = [\n tokenizer._convert_id_to_token(idx) for idx in output[:end_idx]\n ]\n selfie = \"\".join(selfie[1:])\n out_selfies.append(selfie)\n\n # for indicies in outputs:\n # translated_sentence = [tokenizer.idx_to_tokens[idx] for idx in outputs]\n # remove start token\n return out_selfies\n\n @staticmethod\n def load(path, device: torch.device = torch.device(\"cpu\")) -> Transformer:\n data = torch.load(path, map_location=device)\n\n newinstace = Transformer(data[\"model_params\"], data[\"context_params\"])\n newinstace.load_state_dict(data[\"state_dict\"])\n return newinstace.to(device)\n\n def save(self, filepath):\n torch.save(\n {\n \"state_dict\": self.state_dict(),\n **dict(model_params=self.params, context_params=self.context_params),\n },\n filepath,\n )\n\n def getNumberTrainableParams(self) -> int:\n return sum(p.numel() for p in self.parameters() if p.requires_grad)\n\n def getNumberParams(self) -> int:\n return sum(p.numel() for p in self.parameters())" }, { "identifier": "check_metrics", "path": "plot_utils.py", "snippet": "def check_metrics(generated_smiles: List[str], dataset_smiles: List[str]):\n len_before = len(generated_smiles)\n generated_smiles = [g for g in generated_smiles if g is not None]\n len_after = len(generated_smiles)\n\n novel = novelty(generated_smiles, dataset_smiles)\n unique_at_1k = unique_at(generated_smiles, k=1000)\n unique_at_10k = unique_at(generated_smiles, k=10000)\n return dict(\n novelty=novel,\n unique_at_1k=unique_at_1k,\n unique_at_10k=unique_at_10k,\n validity=len_after / float(len_before),\n )" }, { "identifier": "plot_1D_condition", "path": "plot_utils.py", "snippet": "def plot_1D_condition(\n context_col,\n save_path,\n new_context,\n generated_smiles,\n temperature,\n context_dict,\n context_scaler=None,\n):\n for con_col in context_col:\n save_path = os.path.join(\n save_path, f\"{con_col}_{'-'.join(context_col)}_temp{temperature}\"\n )\n os.makedirs(save_path, exist_ok=True)\n\n current_context = new_context[con_col].cpu().detach().numpy()\n if con_col == \"mol_weight\":\n predicted_context = calcContextMolWeight(generated_smiles)\n elif con_col == \"logp\":\n predicted_context = calcContextLogP(generated_smiles)\n elif con_col == \"sascore\":\n predicted_context = calcContextSAScore(generated_smiles)\n elif con_col == \"energy\":\n # TODO: Change to something better\n predicted_context = calcContextEnergy(generated_smiles)\n\n if context_scaler is not None:\n raise NotImplementedError(\"Not implemented yet\")\n # context_list = context_scaler.inverse_transform(context_list)\n\n mean_vals_pred = []\n labels = np.unique(current_context)\n mse_value = []\n mad_value = []\n for label in labels:\n mask = (current_context == label).reshape(-1)\n mean_val = np.mean(predicted_context[mask])\n mean_vals_pred.append(mean_val)\n mse_value.extend((predicted_context[mask] - label) ** 2)\n mad_value.extend(abs(predicted_context[mask] - label))\n\n mse = np.mean(mse_value)\n mad = np.mean(mad_value)\n logger.info(f\"MSE {mse}\")\n logger.info(f\"MAD {mad}\")\n logger.info(f\"SD: {np.std(mad_value)}\")\n\n current_context = current_context.reshape(-1)\n\n # Create a figure and axes\n fig, ax1 = plt.subplots()\n\n # Scatter plot\n ax1.scatter(\n current_context,\n predicted_context,\n label=\"Ground Truth vs Prediction\",\n c=\"blue\",\n alpha=0.5,\n )\n ax1.plot(\n np.arange(np.min(current_context), np.max(current_context) + 1),\n np.arange(np.min(current_context), np.max(current_context) + 1),\n label=\"y=x\",\n c=\"black\",\n )\n ax1.scatter(labels, mean_vals_pred, label=\"Mean predicted values\", c=\"red\")\n ax1.set_xlabel(\"Ground Truth\")\n ax1.set_ylabel(\"Prediction\")\n\n # Histogram\n ax2 = ax1.twinx() # Create a twin Axes sharing the x-axis\n sns.histplot(\n context_dict[con_col],\n # bins=200,\n label=\"Dataset distribution\",\n alpha=0.5,\n # kde=True,\n # element=\"poly\",\n ax=ax2,\n )\n # ax2.hist(\n # context_dict[con_col],\n # bins=200,\n # label=\"Dataset distribution\",\n # alpha=0.5,\n # )\n ax2.set_ylabel(\"Frequency\")\n\n # Combine legends\n handles1, labels1 = ax1.get_legend_handles_labels()\n handles2, labels2 = ax2.get_legend_handles_labels()\n\n ax1.legend(handles1 + handles2, labels1 + labels2)\n\n plt.xlim((np.min(current_context), np.max(current_context) + 1))\n # Set title\n display_name = COL_TO_DISPLAY_NAME[con_col]\n plt.title(f\"{display_name} - temperature: {temperature} - mse: {round(mse, 4)}\")\n\n out_df = pd.DataFrame(\n {\n \"smiles\": generated_smiles,\n f\"{con_col}\": predicted_context.tolist(),\n f\"target_{con_col}\": current_context.tolist(),\n }\n )\n out_df.to_csv(os.path.join(save_path, \"predictions.csv\"), index=False)\n out_path = os.path.join(save_path, \"graph.png\")\n print(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()" }, { "identifier": "plot_2D_condition", "path": "plot_utils.py", "snippet": "def plot_2D_condition(\n context_col,\n save_path,\n new_context,\n generated_smiles,\n temperature,\n label: Union[str, None] = None,\n):\n save_path = os.path.join(\n save_path, f\"multicond2_{'-'.join(context_col)}_temp={temperature}\"\n )\n if label is not None:\n save_path = os.path.join(save_path, label)\n\n os.makedirs(save_path, exist_ok=True)\n delta_dict = {c: [] for c in context_col}\n predicted_context_dict = {}\n for con_col in context_col:\n current_context = new_context[con_col].cpu().numpy()\n if con_col == \"mol_weight\":\n predicted_context = calcContextMolWeight(generated_smiles)\n elif con_col == \"logp\":\n predicted_context = calcContextLogP(generated_smiles)\n elif con_col == \"sascore\":\n predicted_context = calcContextSAScore(generated_smiles)\n elif con_col == \"energy\":\n # TODO: Change to something better\n predicted_context = calcContextEnergy(generated_smiles)\n\n predicted_context_dict[con_col] = np.array(predicted_context)\n delta_dict[con_col] = np.abs(current_context - np.array(predicted_context))\n\n # Create a DataFrame from delta_dict\n df = pd.DataFrame(delta_dict)\n real_values_prop1 = new_context[context_col[0]].cpu().numpy()\n real_values_prop2 = new_context[context_col[1]].cpu().numpy()\n # cmap = plt.get_cmap('Blues') # Choose a green color palette from Matplotlib\n mse_vals_x = []\n mad_vals_x = []\n mse_vals_y = []\n mad_vals_y = []\n fig = plt.figure()\n ax = plt.subplot(111)\n for v1 in np.unique(real_values_prop1):\n for v2 in np.unique(real_values_prop2):\n mask = (real_values_prop1 == v1) & (real_values_prop2 == v2)\n indices = np.nonzero(mask)[0]\n # print(\"Indices\", len(indices))\n # Get the color from the color palette based on the v1 value\n # color = cmap((v1 - np.min(real_values_prop1)) / (np.max(real_values_prop1) - np.min(real_values_prop1)))\n color = np.random.rand(\n 3,\n )\n # # Plot scatter plot with the specified color and label\n\n x_pred = predicted_context_dict[context_col[0]][indices].ravel()\n y_pred = predicted_context_dict[context_col[1]][indices].ravel()\n mse_vals_x.extend((x_pred - v1) ** 2)\n mad_vals_x.extend(np.abs(x_pred - v1))\n\n mse_vals_y.extend((y_pred - v2) ** 2)\n mad_vals_y.extend(np.abs(y_pred - v2))\n\n ax.scatter(x_pred, y_pred, color=color, alpha=0.5)\n\n # Plot KDE plot with the specified color\n # sns.kdeplot(\n # data=pd.DataFrame(\n # {\n # f\"x\": x_pred,\n # f\"y\": y_pred,\n # }\n # ),\n # x=f\"x\",\n # y=f\"y\",\n # color=color,\n # fill=False,\n # bw_adjust=2.25,\n # # label=f\"({v1}, {v2})\"\n # )\n\n ax.scatter(v1, v2, color=color, label=f\"({v1}, {v2})\", marker=\"^\", s=20.0)\n\n mse_x = np.mean(mse_vals_x)\n mad_x = np.mean(mad_vals_x)\n mse_y = np.mean(mse_vals_y)\n mad_y = np.mean(mad_vals_y)\n\n logger.info(f\"MSE {context_col[0]}: {mse_x}\")\n logger.info(f\"MAD {context_col[0]}: {mad_x}\")\n logger.info(f\"MSE {context_col[1]}: {mse_y}\")\n logger.info(f\"MAD {context_col[1]}: {mad_y}\")\n\n file_path = os.path.join(save_path, \"metrics.txt\")\n\n with open(file_path, \"w\") as f:\n f.write(f\"MSE {context_col[0]}: {mse_x} \\n\")\n f.write(f\"MAD {context_col[0]}: {mad_x} \\n\")\n f.write(f\"MSE {context_col[1]}: {mse_y} \\n\")\n f.write(f\"MAD {context_col[1]}: {mad_y} \\n\")\n\n ax.set_xlabel(COL_TO_DISPLAY_NAME[context_col[0]])\n ax.set_ylabel(COL_TO_DISPLAY_NAME[context_col[1]])\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n # Put a legend to the right of the current axis\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n ax.set_title(\"Multi Property Distribution of Generated Molecules\")\n out_path = os.path.join(save_path, \"graph.png\")\n logger.info(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()\n return save_path" }, { "identifier": "plot_3D_condition", "path": "plot_utils.py", "snippet": "def plot_3D_condition(\n context_col, save_path, new_context, generated_smiles, temperature\n):\n save_path = os.path.join(\n save_path, f\"multicond3_{'-'.join(context_col)}_temp={temperature}\"\n )\n os.makedirs(save_path, exist_ok=True)\n predicted_context_dict = {}\n for con_col in context_col:\n predicted_context = calc_context_from_smiles(generated_smiles, con_col)\n\n predicted_context_dict[con_col] = np.array(predicted_context)\n\n real_values_prop1 = new_context[context_col[0]].cpu().numpy()\n real_values_prop2 = new_context[context_col[1]].cpu().numpy()\n real_values_prop3 = new_context[context_col[2]].cpu().numpy()\n # cmap = plt.get_cmap('Blues') # Choose a green color palette from Matplotlib\n\n mse_vals_x = []\n mad_vals_x = []\n mse_vals_y = []\n mad_vals_y = []\n mse_vals_z = []\n mad_vals_z = []\n\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n for v1 in np.unique(real_values_prop1):\n for v2 in np.unique(real_values_prop2):\n for v3 in np.unique(real_values_prop3):\n mask = (\n (real_values_prop1 == v1)\n & (real_values_prop2 == v2)\n & (real_values_prop3 == v3)\n )\n indices = np.nonzero(mask)[0]\n # print(\"Indices\", len(indices))\n # Get the color from the color palette based on the v1 value\n # color = cmap((v1 - np.min(real_values_prop1)) / (np.max(real_values_prop1) - np.min(real_values_prop1)))\n color = np.random.rand(\n 3,\n )\n\n x_pred = predicted_context_dict[context_col[0]][indices].ravel()\n y_pred = predicted_context_dict[context_col[1]][indices].ravel()\n z_pred = predicted_context_dict[context_col[2]][indices].ravel()\n\n mse_vals_x.extend((x_pred - v1) ** 2)\n mad_vals_x.extend(np.abs(x_pred - v1))\n\n mse_vals_y.extend((y_pred - v2) ** 2)\n mad_vals_y.extend(np.abs(y_pred - v2))\n\n mse_vals_z.extend((z_pred - v3) ** 2)\n mad_vals_z.extend(np.abs(z_pred - v3))\n\n # # Plot scatter plot with the specified color and label\n ax.scatter(v1, v2, v3, color=color, label=f\"({v1}, {v2}, {v3})\", s=20.0)\n ax.scatter(\n x_pred,\n y_pred,\n z_pred,\n color=color,\n )\n\n mse_x = np.mean(mse_vals_x)\n mad_x = np.mean(mad_vals_x)\n mse_y = np.mean(mse_vals_y)\n mad_y = np.mean(mad_vals_y)\n mse_z = np.mean(mse_vals_z)\n mad_z = np.mean(mad_vals_z)\n\n logger.info(f\"MSE {context_col[0]}: {mse_x}\")\n logger.info(f\"MAD {context_col[0]}: {mad_x}\")\n logger.info(f\"MSE {context_col[1]}: {mse_y}\")\n logger.info(f\"MAD {context_col[1]}: {mad_y}\")\n logger.info(f\"MSE {context_col[2]}: {mse_z}\")\n logger.info(f\"MAD {context_col[2]}: {mad_z}\")\n\n file_path = os.path.join(save_path, \"metrics.txt\")\n\n with open(file_path, \"w\") as f:\n f.write(f\"MSE {context_col[0]}: {mse_x} \\n\")\n f.write(f\"MAD {context_col[0]}: {mad_x} \\n\")\n\n f.write(f\"MSE {context_col[1]}: {mse_y} \\n\")\n f.write(f\"MAD {context_col[1]}: {mad_y} \\n\")\n\n f.write(f\"MSE {context_col[2]}: {mse_z} \\n\")\n f.write(f\"MAD {context_col[2]}: {mad_z} \\n\")\n\n ax.set_xlabel(COL_TO_DISPLAY_NAME[context_col[0]])\n ax.set_ylabel(COL_TO_DISPLAY_NAME[context_col[1]])\n ax.set_zlabel(COL_TO_DISPLAY_NAME[context_col[2]])\n # plt.legend(\n # bbox_to_anchor=(1.0, 0.5),\n # loc=\"center right\",\n # bbox_transform=plt.gcf().transFigure,\n # )\n # plt.subplots_adjust(left=0.05, bottom=0.1, right=0.8)\n plt.legend(\n bbox_to_anchor=(1.035, 0.5),\n loc=\"center right\",\n bbox_transform=plt.gcf().transFigure,\n )\n plt.subplots_adjust(left=0.05, bottom=0.1, right=0.775)\n\n plt.title(\"Multi Property Distribution of Generated Molecules\")\n out_path = os.path.join(save_path, \"graph.png\")\n print(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()\n\n return save_path" }, { "identifier": "plot_unconditional", "path": "plot_utils.py", "snippet": "def plot_unconditional(\n out_path: str = os.getcwd(),\n smiles: List[str] = [],\n temperature: float = 0.8,\n cmp_context_dict: Union[Dict[str, np.array], None] = None,\n context_cols: List[str] = [\"logp\", \"sascore\", \"mol_weight\"],\n):\n out_path = os.path.join(out_path, \"unconditional\")\n os.makedirs(out_path, exist_ok=True)\n\n for c in context_cols:\n plt.clf()\n\n context_cal = calc_context_from_smiles(smiles, c)\n\n if cmp_context_dict is not None:\n sns.histplot(\n cmp_context_dict[c],\n stat=\"density\",\n label=\"Dataset Distribution\",\n alpha=0.75,\n color=\"blue\",\n )\n sns.histplot(\n context_cal,\n stat=\"density\",\n label=\"Generated Molecules Distribution\",\n alpha=0.5,\n color=\"orange\",\n )\n\n if c == \"logp\":\n plt.xlim((-6, 8))\n else:\n plt.xlim((0, 10))\n\n plt.xlabel(COL_TO_DISPLAY_NAME[c])\n plt.title(\n f\"Unconditional Distribution {COL_TO_DISPLAY_NAME[c]} \\nwith Temperature {temperature}\"\n )\n plt.legend()\n\n out_file = os.path.join(out_path, f\"unc_{c}_temp={temperature}.png\")\n plt.savefig(out_file)\n logger.info(f\"Saved Unconditional to {out_file}\")" }, { "identifier": "SmilesTokenizer", "path": "tokenizer.py", "snippet": "class SmilesTokenizer(BertTokenizer):\n \"\"\"\n Creates the SmilesTokenizer class. The tokenizer heavily inherits from the BertTokenizer\n implementation found in Huggingface's transformers library. It runs a WordPiece tokenization\n algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.\n\n Please see https://github.com/huggingface/transformers\n and https://github.com/rxn4chemistry/rxnfp for more details.\n\n Examples\n --------\n >>> from deepchem.feat.smiles_tokenizer import SmilesTokenizer\n >>> current_dir = os.path.dirname(os.path.realpath(__file__))\n >>> vocab_path = os.path.join(current_dir, 'tests/data', 'vocab.txt')\n >>> tokenizer = SmilesTokenizer(vocab_path)\n >>> print(tokenizer.encode(\"CC(=O)OC1=CC=CC=C1C(=O)O\"))\n [12, 16, 16, 17, 22, 19, 18, 19, 16, 20, 22, 16, 16, 22, 16, 16, 22, 16, 20, 16, 17, 22, 19, 18, 19, 13]\n\n\n References\n ----------\n .. [1] Schwaller, Philippe; Probst, Daniel; Vaucher, Alain C.; Nair, Vishnu H; Kreutter, David;\n Laino, Teodoro; et al. (2019): Mapping the Space of Chemical Reactions using Attention-Based Neural\n Networks. ChemRxiv. Preprint. https://doi.org/10.26434/chemrxiv.9897365.v3\n\n Notes\n ----\n This class requires huggingface's transformers and tokenizers libraries to be installed.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n\n def __init__(\n self,\n # unk_token=\"[UNK]\",\n # sep_token=\"[SEP]\",\n # pad_token=\"[PAD]\",\n # cls_token=\"[CLS]\",\n # mask_token=\"[MASK]\",\n **kwargs\n ):\n \"\"\"Constructs a SmilesTokenizer.\n\n Parameters\n ----------\n vocab_file: str\n Path to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n \"\"\"\n\n vocab_file = os.path.join(os.path.dirname(__file__), \"data\", \"vocab.txt\")\n\n super().__init__(vocab_file, **kwargs)\n\n self.sos = \"[SOS]\"\n self.eos = \"[EOS]\"\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\"Can't find a vocab file at path '{}'.\".format(vocab_file))\n self.vocab = load_vocab(vocab_file)\n self.highest_unused_index = max(\n [i for i, v in enumerate(self.vocab.keys()) if v.startswith(\"[unused\")]\n )\n self.ids_to_tokens = collections.OrderedDict(\n [(ids, tok) for tok, ids in self.vocab.items()]\n )\n self.basic_tokenizer = BasicSmilesTokenizer()\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n @property\n def vocab_list(self):\n return list(self.vocab.keys())\n\n def _tokenize(self, text: str):\n \"\"\"\n Tokenize a string into a list of tokens.\n\n Parameters\n ----------\n text: str\n Input string sequence to be tokenized.\n \"\"\"\n\n split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"\n Converts a token (str/unicode) in an id using the vocab.\n\n Parameters\n ----------\n token: str\n String token from a larger sequence to be converted to a numerical id.\n \"\"\"\n\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"\n Converts an index (integer) in a token (string/unicode) using the vocab.\n\n Parameters\n ----------\n index: int\n Integer index to be converted back to a string-based token as part of a larger sequence.\n \"\"\"\n\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: List[str]):\n \"\"\"Converts a sequence of tokens (string) in a single string.\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n Returns\n -------\n out_string: str\n Single string from combined tokens.\n \"\"\"\n\n out_string: str = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def add_special_tokens_ids_single_sequence(self, token_ids: List[int]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n \"\"\"\n\n return [self.cls_token_id] + token_ids + [self.sep_token_id]\n\n def add_special_tokens_single_sequence(self, tokens: List[str]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n \"\"\"\n return [self.cls_token] + tokens + [self.sep_token]\n\n def add_special_tokens_ids_sequence_pair(\n self, token_ids_0: List[int], token_ids_1: List[int]\n ) -> List[int]:\n \"\"\"\n Adds special tokens to a sequence pair for sequence classification tasks.\n A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]\n\n Parameters\n ----------\n token_ids_0: List[int]\n List of ids for the first string sequence in the sequence pair (A).\n\n token_ids_1: List[int]\n List of tokens for the second string sequence in the sequence pair (B).\n \"\"\"\n\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def add_padding_tokens(\n self, token_ids: List[int], length: int, right: bool = True\n ) -> List[int]:\n \"\"\"\n Adds padding tokens to return a sequence of length max_length.\n By default padding tokens are added to the right of the sequence.\n\n Parameters\n ----------\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n length: int\n\n right: bool (True by default)\n\n Returns\n ----------\n token_ids :\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n padding: int\n Integer to be added as padding token\n\n \"\"\"\n padding = [self.pad_token_id] * (length - len(token_ids))\n\n if right:\n return token_ids + padding\n else:\n return padding + token_ids\n\n def save_vocabulary(\n self, vocab_path: str\n ): # -> tuple[str]: doctest issue raised with this return type annotation\n \"\"\"\n Save the tokenizer vocabulary to a file.\n\n Parameters\n ----------\n vocab_path: obj: str\n The directory in which to save the SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n Returns\n ----------\n vocab_file: :obj:`Tuple(str)`:\n Paths to the files saved.\n typle with string to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n \"\"\"\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES[\"vocab_file\"])\n else:\n vocab_file = vocab_path\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(\n vocab_file\n )\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)" } ]
import os import sys import time import pandas as pd import torch import numpy as np import re import logging import argparse import rdkit.rdBase as rkrb import rdkit.RDLogger as rkl from contextlib import nullcontext from tqdm.auto import tqdm from model import Transformer from plot_utils import ( check_metrics, plot_1D_condition, plot_2D_condition, plot_3D_condition, plot_unconditional, ) from tokenizer import SmilesTokenizer from typing import Dict, List, Tuple, Union from rdkit import Chem from rdkit import DataStructs from rdkit.Chem.Fingerprints import FingerprintMols
13,302
gens_per_step = num_samples // total_gen_steps logger.debug(f"Gens per Step: {gens_per_step}") context = None # {"context": None, "fragment" : None} out_smiles = [] with tqdm(total=total_gen_steps, desc="Batch") as pbar: for i in range(total_gen_steps): if isinstance(context_cols, dict): # TODO: Test if same length cd = { c: context_cols[c][ i * gens_per_step : (i + 1) * gens_per_step ] for c in context_cols.keys() } context_dict = {"context": cd, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[ 1:-1 ] context_tensor = torch.tensor( [incorporate_selfie] * gens_per_step, dtype=torch.long, device=self.device, ) context_dict["fragment"] = context_tensor context_cols = list(context_cols.keys()) else: context_dict = self.get_context( context_cols, context_smi, num_examples=gens_per_step ) # for k in range(num_samples): y = self.model.generate( self.tokenizer, context=context_dict["context"], fragments=context_dict["fragment"], start_smiles=start_smiles, num_gen=gens_per_step, temperature=temperature, top_k=top_k, max_length=max_new_tokens, device=self.device, cache_kv=use_kv_cache, ) new_context = {k: [] for k in context_dict["context"]} for i, sample in enumerate(y): # print(sample) mol = Chem.MolFromSmiles(sample) if mol is not None: out_smiles.append(sample) for k in new_context: new_context[k].append( context_dict["context"][k][i].unsqueeze(-1) ) for k in new_context: new_context[k] = torch.concat(new_context[k], dim=0) if context is None: context = new_context else: for k in context: context[k] = torch.concat( [context[k], new_context[k]], dim=0 ) pbar.update(1) logger.info( f"Number valid generated: {len(out_smiles) / num_samples * 100} %" ) logger.info("---------------") if return_context: return (out_smiles, context) else: return out_smiles @torch.no_grad() def generate_with_evaluation( self, context_cols: Union[List[str], None] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, cmp_context_dict: Union[Dict[str, torch.Tensor], None] = None, total_gen_steps: int = 1, use_kv_cache: bool = False, ): out_smiles, new_context = self.generate( context_cols=context_cols, context_smi=context_smi, start_smiles=start_smiles, num_samples=num_samples, max_new_tokens=max_new_tokens, temperature=temperature, top_k=top_k, return_context=True, total_gen_steps=total_gen_steps, use_kv_cache=use_kv_cache, ) out_dir = os.path.dirname(self.load_path) if context_cols is not None: if len(context_cols) == 1:
# from tqdm.notebook import tqdm logger = logging.getLogger(__name__) class Sampler: def __init__( self, load_path: str, device: str = "cpu", seed: int = 1337, dtype: str = "float16", compile: bool = True, quantize: bool = False, ) -> None: self.load_path = load_path self.device = device self.dtype = dtype self.compile = compile self.quantize = quantize self.seed = seed self._init_model() def _init_model(self): np.random.seed(self.seed) torch.cuda.manual_seed(self.seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn self.device_type = ( "cuda" if "cuda" in self.device else "cpu" ) # for later use in torch.autocast ptdtype = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, }[self.dtype] self.ptdtype = ptdtype self.ctx = self._autocast() # init from a model saved in a specific directory # ckpt_path = os.path.join(out_dir, "ckpt_full_dim=256.pt") self.model = Transformer.load(self.load_path, device=self.device) self.model.eval() if self.quantize: raise NotImplementedError("Not properly implemented for CPU / GPU") self.model = torch.ao.quantization.quantize_dynamic( self.model, # the original model {torch.nn.Linear}, # a set of layers to dynamically quantize dtype=torch.qint8, ) if self.compile: logger.info("Compiling the model...") self.model = torch.compile(self.model) # requires PyTorch 2.0 (optional) self.model = self.model.to(self.device) # load the tokenizer self.tokenizer = SmilesTokenizer() def get_context( self, context_col: List[str], context_smi: str, num_examples: int = 50, ): """ Returns a dictionary in the form of { "fragment": torch.tensor, "context": { "logp": torch.tensor, "sascore": torch.tensor, "mol_weight": torch.tensor } } When context_smi is set to a string, then the "fragment" field is populated. All of the properties listed in the context_col list is set to the keys and the values are set to a resonable range for each property. num_examples indicates how many values are sampled for each property. """ output_dict = {"context": {}, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[1:-1] context = torch.tensor( [incorporate_selfie] * num_examples, dtype=torch.long, device=self.device, ) output_dict["fragment"] = context if context_col is None: return output_dict if "logp" in context_col: # context = 0.5 * torch.randint( # -8, 14, (num_examples,), device=self.device, dtype=torch.float # ) # context = 0.5 * torch.randint( # -6, 6, (num_examples, 1), device=device, dtype=torch.float # ) context = torch.tensor( np.random.choice([-2, 0, 2], (num_examples,)), device=self.device, dtype=self.ptdtype, ) # context = 2.0 * torch.ones( # (num_examples,1), device=device, dtype=torch.float # ) # context = -2.0*torch.ones((num_examples,2),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["logp"] = context if "energy" in context_col: context = 0.1 * torch.randint( -15, 15, (num_examples,), device=self.device, dtype=torch.float ) # context = -2.0*torch.ones((num_examples,2),device=device,dtype=torch.float) context, _ = torch.sort(context, 0) output_dict["context"]["energy"] = context if "sascore" in context_col: # context = 0.5 * torch.randint( # 2, 20, (num_examples, ), device=self.device, dtype=torch.float # ) context = torch.tensor( np.random.choice([2, 3, 4], (num_examples,)), device=self.device, dtype=torch.float, ) # context = 0.5 * torch.randint( # 4, 8, (num_examples, 1), device=device, dtype=torch.float # ) # context = 2.0*torch.ones((num_examples,1),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["sascore"] = context if "mol_weight" in context_col: # context = 0.5 * torch.randint( # 2, 20, (num_examples,), device=self.device, dtype=torch.float # ) context = torch.tensor( np.random.choice([2.0, 3.0, 4.0], (num_examples,)), device=self.device, dtype=torch.float, ) # context = 0.5 * torch.randint( # 2, 20, (num_examples, 1), device=device, dtype=torch.float # ) # context = 2.5*torch.ones((num_examples,1),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["mol_weight"] = context return output_dict def _autocast(self): if "cuda" in self.device: if self.dtype == "bfloat16" and torch.cuda.is_bf16_supported(): return torch.cuda.amp.autocast(dtype=torch.bfloat16) elif self.dtype == "float16": return torch.cuda.amp.autocast(dtype=torch.float16) else: return torch.cuda.amp.autocast(dtype=torch.float32) else: # cpu return nullcontext() @torch.no_grad() def generate( self, context_cols: Union[List[str], None, Dict[str, torch.Tensor]] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, return_context: bool = False, total_gen_steps: int = 1, use_kv_cache: bool = False, ) -> Union[List[str], Tuple[List[str], List[float]]]: """ Generates a list of SMILES. With the default options it would generate them unconditionally. Params: - context_cols : When a list the context is randomly sampled from the get_context method, when given a dictionary the context values are taken from the dictionary instead. - context_smi : Further conditioning by the usage of a molecular fragment . start_smiles : Can be used to start the SMILES with a specific string, the model then generates the next tokens including that start sequence. - num_samples : Controlls how many SMILES in total will be generated be the model. - max_new_tokens : Controlls the maximum length of each SMILES (in tokens) that is generated. - temperature: Controlls the randomness of the model. A temperature = 1.0 means it is the trained distribution. A temperature < 1 is more deterministic and temperature > 1 is more random - top_k : Clamps the probability distribution to the top k tokens. From these the next token is then sampled from. - return_context : Whether the context that was given to the model should be returned. - total_gen_steps : In how many sub steps the generation should be split up to. Useful when generation 10k + SMILES and wanting to chunk these into for example 10 * 1k generations with total_gen_steps = 10. - use_kv_cache: Runs the generation using kv-caching. It is faster, but takes more memory. """ with self.ctx: gens_per_step = num_samples // total_gen_steps logger.debug(f"Gens per Step: {gens_per_step}") context = None # {"context": None, "fragment" : None} out_smiles = [] with tqdm(total=total_gen_steps, desc="Batch") as pbar: for i in range(total_gen_steps): if isinstance(context_cols, dict): # TODO: Test if same length cd = { c: context_cols[c][ i * gens_per_step : (i + 1) * gens_per_step ] for c in context_cols.keys() } context_dict = {"context": cd, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[ 1:-1 ] context_tensor = torch.tensor( [incorporate_selfie] * gens_per_step, dtype=torch.long, device=self.device, ) context_dict["fragment"] = context_tensor context_cols = list(context_cols.keys()) else: context_dict = self.get_context( context_cols, context_smi, num_examples=gens_per_step ) # for k in range(num_samples): y = self.model.generate( self.tokenizer, context=context_dict["context"], fragments=context_dict["fragment"], start_smiles=start_smiles, num_gen=gens_per_step, temperature=temperature, top_k=top_k, max_length=max_new_tokens, device=self.device, cache_kv=use_kv_cache, ) new_context = {k: [] for k in context_dict["context"]} for i, sample in enumerate(y): # print(sample) mol = Chem.MolFromSmiles(sample) if mol is not None: out_smiles.append(sample) for k in new_context: new_context[k].append( context_dict["context"][k][i].unsqueeze(-1) ) for k in new_context: new_context[k] = torch.concat(new_context[k], dim=0) if context is None: context = new_context else: for k in context: context[k] = torch.concat( [context[k], new_context[k]], dim=0 ) pbar.update(1) logger.info( f"Number valid generated: {len(out_smiles) / num_samples * 100} %" ) logger.info("---------------") if return_context: return (out_smiles, context) else: return out_smiles @torch.no_grad() def generate_with_evaluation( self, context_cols: Union[List[str], None] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, cmp_context_dict: Union[Dict[str, torch.Tensor], None] = None, total_gen_steps: int = 1, use_kv_cache: bool = False, ): out_smiles, new_context = self.generate( context_cols=context_cols, context_smi=context_smi, start_smiles=start_smiles, num_samples=num_samples, max_new_tokens=max_new_tokens, temperature=temperature, top_k=top_k, return_context=True, total_gen_steps=total_gen_steps, use_kv_cache=use_kv_cache, ) out_dir = os.path.dirname(self.load_path) if context_cols is not None: if len(context_cols) == 1:
plot_1D_condition(
2
2023-11-28 09:50:31+00:00
16k
lampmerchant/tashrouter
tashrouter/router/router.py
[ { "identifier": "RoutingTable", "path": "tashrouter/router/routing_table.py", "snippet": "class RoutingTable:\n '''A Router's routing table.'''\n \n STATE_GOOD = 1\n STATE_SUS = 2\n STATE_BAD = 3\n STATE_WORST = 4\n \n def __init__(self, router):\n self._router = router\n self._entry_by_network = {}\n self._state_by_entry = {}\n self._lock = Lock()\n \n def __contains__(self, entry):\n with self._lock:\n return True if entry in self._state_by_entry else False\n \n def __iter__(self):\n with self._lock:\n retval = deque(self._state_by_entry.keys())\n yield from retval\n \n def get_by_network(self, network):\n '''Look up and return an entry in this RoutingTable by network number. Returns (entry, is_bad).'''\n with self._lock:\n entry = self._entry_by_network.get(network)\n if entry is None: return None, None\n return entry, True if self._state_by_entry[entry] in (self.STATE_BAD, self.STATE_WORST) else False\n \n def mark_bad(self, network_min, network_max):\n '''If this RoutingTable has an entry with the given network range, mark it bad. Return True if it existed, else False.'''\n with self._lock:\n cur_entries = set(self._entry_by_network.get(network) for network in range(network_min, network_max + 1))\n if len(cur_entries) != 1: return False\n cur_entry = cur_entries.pop() # this is either None or an entry with a coincident range to the new one\n if not cur_entry: return False\n if self._state_by_entry[cur_entry] != self.STATE_WORST: self._state_by_entry[cur_entry] = self.STATE_BAD\n return True\n \n def consider(self, entry):\n '''Consider a new entry for addition to the table. Return True if added, False if not.'''\n \n with self._lock:\n if entry in self._state_by_entry:\n self._state_by_entry[entry] = self.STATE_GOOD\n return True\n cur_entries = set(self._entry_by_network.get(network) for network in range(entry.network_min, entry.network_max + 1))\n if len(cur_entries) != 1: return False # this network range overlaps one that's already defined, can't do anything with it\n cur_entry = cur_entries.pop()\n \n # range currently undefined, add new entry to the table\n if cur_entry is None:\n pass\n # range fully defined by an entry that is either bad or further away, add new entry to the table\n elif cur_entry.distance >= entry.distance or self._state_by_entry[cur_entry] in (self.STATE_BAD, self.STATE_WORST):\n pass\n # range fully defined by an entry representing a route that is now further than we thought, add new entry to the table\n elif (cur_entry.next_network, cur_entry.next_node, cur_entry.port) == (entry.next_network, entry.next_node, entry.port):\n pass\n # range fully defined by a good entry that is closer than the new one, ignore new entry\n else:\n return False\n \n if cur_entry: self._state_by_entry.pop(cur_entry)\n self._state_by_entry[entry] = self.STATE_GOOD\n for network in range(entry.network_min, entry.network_max + 1): self._entry_by_network[network] = entry\n logging.debug('%s adding: %s', str(self._router), str(entry))\n return True\n \n def age(self):\n '''Age the RoutingTableEntries in this RoutingTable.'''\n entries_to_delete = set()\n networks_to_delete = deque()\n with self._lock:\n for entry in set(self._entry_by_network.values()):\n if self._state_by_entry[entry] == self.STATE_WORST:\n logging.debug('%s aging out: %s', str(self._router), str(entry))\n entries_to_delete.add(entry)\n self._state_by_entry.pop(entry)\n try:\n self._router.zone_information_table.remove_networks(entry.network_min, entry.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't remove networks from zone information table: %s\", str(self._router), e.args[0])\n elif self._state_by_entry[entry] == self.STATE_BAD:\n self._state_by_entry[entry] = self.STATE_WORST\n elif self._state_by_entry[entry] == self.STATE_SUS:\n self._state_by_entry[entry] = self.STATE_BAD\n elif self._state_by_entry[entry] == self.STATE_GOOD and entry.distance != 0:\n self._state_by_entry[entry] = self.STATE_SUS\n for network, entry in self._entry_by_network.items():\n if entry in entries_to_delete: networks_to_delete.append(network)\n for network in networks_to_delete: self._entry_by_network.pop(network)\n \n def entries(self):\n '''Yield entries from this RoutingTable along with their badness state.'''\n with self._lock: retval = deque(self._state_by_entry.items())\n for entry, state in retval: yield entry, True if state in (self.STATE_BAD, self.STATE_WORST) else False\n \n def set_port_range(self, port, network_min, network_max):\n '''Set the network range for a given port, unsetting any previous entries in the table that defined it.'''\n entries_to_delete = set()\n networks_to_delete = deque()\n with self._lock:\n for network, entry in self._entry_by_network.items():\n if entry.port is port and entry.distance == 0:\n entries_to_delete.add(entry)\n networks_to_delete.append(network)\n for entry in entries_to_delete:\n logging.debug('%s deleting: %s', str(self._router), str(entry))\n self._state_by_entry.pop(entry)\n try:\n self._router.zone_information_table.remove_networks(entry.network_min, entry.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't remove networks from zone information table: %s\", str(self._router), e.args[0])\n for network in networks_to_delete: self._entry_by_network.pop(network)\n entry = RoutingTableEntry(extended_network=port.extended_network,\n network_min=network_min,\n network_max=network_max,\n distance=0,\n port=port,\n next_network=0,\n next_node=0)\n logging.debug('%s adding: %s', str(self._router), str(entry))\n for network in range(network_min, network_max + 1): self._entry_by_network[network] = entry\n self._state_by_entry[entry] = self.STATE_GOOD" }, { "identifier": "ZoneInformationTable", "path": "tashrouter/router/zone_information_table.py", "snippet": "class ZoneInformationTable:\n '''Zone Information Table (ZIT).'''\n \n def __init__(self, router):\n self._router = router\n self._network_min_to_network_max = {}\n self._network_min_to_zone_name_set = {}\n self._network_min_to_default_zone_name = {}\n self._zone_name_to_network_min_set = {}\n self._ucased_zone_name_to_zone_name = {}\n self._lock = Lock()\n \n def _check_range(self, network_min, network_max=None):\n looked_up_network_max = self._network_min_to_network_max.get(network_min)\n if network_max is None:\n if looked_up_network_max is None:\n raise ValueError('network range %d-? does not exist' % network_min)\n else:\n return looked_up_network_max\n elif looked_up_network_max == network_max: # if network range exists as given\n return network_max\n elif looked_up_network_max is not None:\n raise ValueError('network range %d-%d overlaps %d-%d' % (network_min, network_max, network_min, looked_up_network_max))\n else: # check for overlap\n for existing_min, existing_max in self._network_min_to_network_max.items():\n if existing_min > network_max or existing_max < network_min: continue\n raise ValueError('network range %d-%d overlaps %d-%d' % (network_min, network_max, existing_min, existing_max))\n return None\n \n def add_networks_to_zone(self, zone_name, network_min, network_max=None):\n '''Add a range of networks to a zone, adding the zone if it isn't in the table.'''\n \n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n ucased_zone_name = ucase(zone_name)\n \n with self._lock:\n \n if ucased_zone_name in self._ucased_zone_name_to_zone_name:\n zone_name = self._ucased_zone_name_to_zone_name[ucased_zone_name]\n else:\n self._ucased_zone_name_to_zone_name[ucased_zone_name] = zone_name\n self._zone_name_to_network_min_set[zone_name] = set()\n \n check_range = self._check_range(network_min, network_max)\n if check_range:\n network_max = check_range\n self._network_min_to_zone_name_set[network_min].add(zone_name)\n now_default = False\n else:\n self._network_min_to_network_max[network_min] = network_max\n self._network_min_to_zone_name_set[network_min] = set((zone_name,))\n self._network_min_to_default_zone_name[network_min] = zone_name\n now_default = True\n \n logging.debug('%s adding network range %d-%d to zone %s%s', str(self._router), network_min, network_max,\n zone_name.decode('mac_roman', 'replace'), ' (now default zone for this range)' if now_default else '')\n self._zone_name_to_network_min_set[zone_name].add(network_min)\n \n def remove_networks(self, network_min, network_max=None):\n '''Remove a range of networks from all zones, removing associated zones if now empty of networks.'''\n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n with self._lock:\n network_max = self._check_range(network_min, network_max)\n if not network_max: return\n logging.debug('%s removing network range %d-%d from all zones', str(self._router), network_min, network_max)\n for zone_name in self._network_min_to_zone_name_set[network_min]:\n s = self._zone_name_to_network_min_set[zone_name]\n s.remove(network_min)\n if not s:\n logging.debug('%s removing zone %s because it no longer contains any networks', str(self._router),\n zone_name.decode('mac_roman', 'replace'))\n self._zone_name_to_network_min_set.pop(zone_name)\n self._ucased_zone_name_to_zone_name.pop(ucase(zone_name))\n self._network_min_to_default_zone_name.pop(network_min)\n self._network_min_to_zone_name_set.pop(network_min)\n self._network_min_to_network_max.pop(network_min)\n \n def zones(self):\n '''Return the zones in this ZIT.'''\n with self._lock:\n return list(self._zone_name_to_network_min_set.keys())\n \n def zones_in_network_range(self, network_min, network_max=None):\n '''Return a deque containing the names of all zones in the given range of networks, default zone name first.'''\n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n with self._lock:\n if not self._check_range(network_min, network_max): return deque()\n default_zone_name = self._network_min_to_default_zone_name[network_min]\n retval = deque(zone_name for zone_name in self._network_min_to_zone_name_set[network_min] if zone_name != default_zone_name)\n retval.appendleft(default_zone_name)\n return retval\n \n def networks_in_zone(self, zone_name):\n '''Return a deque containing the network numbers of all networks in the given zone.'''\n with self._lock:\n zone_name = self._ucased_zone_name_to_zone_name.get(ucase(zone_name))\n if zone_name is None: return deque()\n retval = deque()\n for network_min in self._zone_name_to_network_min_set[zone_name]:\n retval.extend(range(network_min, self._network_min_to_network_max[network_min] + 1))\n return retval" }, { "identifier": "Datagram", "path": "tashrouter/datagram.py", "snippet": "class Datagram:\n '''DDP datagram.'''\n \n MAX_DATA_LENGTH = 586\n \n hop_count: int\n destination_network: int\n source_network: int\n destination_node: int\n source_node: int\n destination_socket: int\n source_socket: int\n ddp_type: int\n data: bytes\n \n @classmethod\n def from_long_header_bytes(cls, data):\n '''Construct a Datagram object from bytes in the long-header format and raise ValueErrors if there are issues.'''\n if len(data) < 13: raise ValueError('data too short, must be at least 13 bytes for long-header DDP datagram')\n (first, second, checksum, destination_network, source_network, destination_node, source_node, destination_socket, source_socket,\n ddp_type) = struct.unpack('>BBHHHBBBBB', data[:13])\n if first & 0xC0: raise ValueError('invalid long DDP header, top two bits of first byte must be zeroes')\n hop_count = (first & 0x3C) >> 2\n length = (first & 0x3) << 8 | second\n if length > 13 + cls.MAX_DATA_LENGTH:\n raise ValueError('invalid long DDP header, length %d is greater than %d' % (length, cls.MAX_DATA_LENGTH))\n if length != len(data):\n raise ValueError('invalid long DDP header, length field says %d but actual length is %d' % (length, len(data)))\n if checksum != 0:\n calc_checksum = ddp_checksum(data[4:])\n if calc_checksum != checksum:\n raise ValueError('invalid long DDP header, checksum is 0x%04X but should be 0x%04X' % (checksum, calc_checksum))\n return cls(hop_count=hop_count,\n destination_network=destination_network,\n source_network=source_network,\n destination_node=destination_node,\n source_node=source_node,\n destination_socket=destination_socket,\n source_socket=source_socket,\n ddp_type=ddp_type,\n data=data[13:])\n \n @classmethod\n def from_short_header_bytes(cls, destination_node, source_node, data):\n '''Construct a Datagram object from bytes in the short-header format and raise ValueErrors if there are issues.'''\n if len(data) < 5: raise ValueError('data too short, must be at least 5 bytes for short-header DDP datagram')\n first, second, destination_socket, source_socket, ddp_type = struct.unpack('>BBBBB', data[0:5])\n if first & 0xFC: raise ValueError('invalid short DDP header, top six bits of first byte must be zeroes')\n length = (first & 0x3) << 8 | second\n if length > 5 + cls.MAX_DATA_LENGTH:\n raise ValueError('invalid short DDP header, length %d is greater than %d' % (length, cls.MAX_DATA_LENGTH))\n if length != len(data):\n raise ValueError('invalid short DDP header, length field says %d but actual length is %d' % (length, len(data)))\n return cls(hop_count=0,\n destination_network=0,\n source_network=0,\n destination_node=destination_node,\n source_node=source_node,\n destination_socket=destination_socket,\n source_socket=source_socket,\n ddp_type=ddp_type,\n data=data[5:])\n \n def _check_ranges(self):\n '''Check that the Datagram's parameters are in range, raise ValueError if not.'''\n for name, min_value, max_value in (('hop count', 0, 15),\n ('destination network', 0, 65534),\n ('source network', 0, 65534),\n ('destination node', 0, 255),\n ('source node', 1, 254),\n ('destination socket', 0, 255),\n ('source socket', 0, 255),\n ('DDP type', 0, 255)):\n value = getattr(self, name.lower().replace(' ', '_'))\n if not min_value <= value <= max_value:\n raise ValueError('invalid %s %d, must be in range %d-%d' % (name, value, min_value, max_value))\n \n def as_long_header_bytes(self):\n '''Return this Datagram in long-header format as bytes and raise ValueErrors if there are issues.'''\n self._check_ranges()\n if len(self.data) > self.MAX_DATA_LENGTH:\n raise ValueError('data length %d is greater than max length %d' % (len(self.data), self.MAX_DATA_LENGTH))\n header = struct.pack('>HHBBBBB',\n self.destination_network,\n self.source_network,\n self.destination_node,\n self.source_node,\n self.destination_socket,\n self.source_socket,\n self.ddp_type)\n data = header + self.data\n length = 4 + len(data)\n checksum = 0\n for byte in data:\n checksum += byte\n checksum = (checksum & 0x7FFF) << 1 | (1 if checksum & 0x8000 else 0)\n checksum = checksum or 0xFFFF # because a zero value in the checksum field means one was not calculated\n header = struct.pack('>BBH',\n (self.hop_count & 0xF) << 2 | (length & 0x300) >> 8,\n length & 0xFF,\n checksum)\n return header + data\n \n def as_short_header_bytes(self):\n '''Return this Datagram in short-header format as bytes and raise ValueErrors if there are issues.'''\n if self.hop_count > 0:\n raise ValueError('invalid hop count %d, short-header datagrams may not have non-zero hop count' % self.hop_count)\n self._check_ranges()\n if len(self.data) > self.MAX_DATA_LENGTH:\n raise ValueError('data length %d is greater than max length %d' % (len(self.data), self.MAX_DATA_LENGTH))\n length = 5 + len(self.data)\n header = struct.pack('>BBBBB',\n (length & 0x300) >> 8,\n length & 0xFF,\n self.destination_socket,\n self.source_socket,\n self.ddp_type)\n return header + self.data\n \n def copy(self, **kwargs):\n '''Return a copy of this Datagram, replacing params specified by kwargs, if any.'''\n return dataclasses.replace(self, **kwargs)\n \n def hop(self):\n '''Return a copy of this Datagram with the hop count incremented by one.'''\n return self.copy(hop_count=self.hop_count + 1)" }, { "identifier": "EchoService", "path": "tashrouter/service/echo.py", "snippet": "class EchoService(Service):\n '''A Service which implements AppleTalk Echo Protocol (AEP).'''\n \n ECHO_SAS = 4\n ECHO_DDP_TYPE = 4\n \n ECHO_FUNC_REQUEST_BYTE = b'\\x01'\n ECHO_FUNC_REPLY_BYTE = b'\\x02'\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n if datagram.ddp_type != self.ECHO_DDP_TYPE: continue\n if not datagram.data: continue\n if datagram.data[0:1] != self.ECHO_FUNC_REQUEST_BYTE: continue\n router.reply(datagram, rx_port, self.ECHO_DDP_TYPE, self.ECHO_FUNC_REPLY_BYTE + datagram.data[1:])\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "NameInformationService", "path": "tashrouter/service/name_information.py", "snippet": "class NameInformationService(Service):\n '''A Service that implements Name Binding Protocol (NBP).'''\n \n NBP_SAS = 2\n NBP_DDP_TYPE = 2\n \n NBP_CTRL_BRRQ = 1\n NBP_CTRL_LKUP = 2\n NBP_CTRL_LKUP_REPLY = 3\n NBP_CTRL_FWDREQ = 4\n \n MAX_FIELD_LEN = 32\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _run(self, router):\n \n self.started_event.set()\n \n while True:\n \n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n \n if datagram.ddp_type != self.NBP_DDP_TYPE: continue\n if len(datagram.data) < 12: continue\n func_tuple_count, nbp_id, req_network, req_node, req_socket, _, object_field = struct.unpack('>BBHBBBB', datagram.data[:8])\n func = func_tuple_count >> 4\n tuple_count = func_tuple_count & 0xF\n if tuple_count != 1 or func not in (self.NBP_CTRL_BRRQ, self.NBP_CTRL_FWDREQ): continue\n if object_field < 1 or object_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 8 + object_field: continue\n type_field = datagram.data[8 + object_field]\n if type_field < 1 or type_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 9 + object_field + type_field: continue\n zone_field = datagram.data[9 + object_field + type_field]\n if zone_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 10 + object_field + type_field + zone_field: continue\n zone_field = datagram.data[10 + object_field + type_field:10 + object_field + type_field + zone_field] or b'*'\n type_field = datagram.data[9 + object_field:9 + object_field + type_field]\n object_field = datagram.data[8:8 + object_field]\n \n common_data = b''.join((struct.pack('>BHBBBB', nbp_id, req_network, req_node, req_socket, 0, len(object_field)),\n object_field,\n struct.pack('>B', len(type_field)),\n type_field,\n struct.pack('>B', len(zone_field)),\n zone_field))\n lkup_data = struct.pack('>B', (self.NBP_CTRL_LKUP << 4) | 1) + common_data\n fwdreq_data = struct.pack('>B', (self.NBP_CTRL_FWDREQ << 4) | 1) + common_data\n \n if func == self.NBP_CTRL_BRRQ:\n \n # if zone is *, try to sub in the zone name associated with the nonextended network whence the BrRq comes\n if zone_field == b'*':\n if rx_port.extended_network: continue # BrRqs from extended networks must provide zone name\n if rx_port.network:\n entry, _ = router.routing_table.get_by_network(rx_port.network)\n if entry:\n try:\n zones = router.zone_information_table.zones_in_network_range(entry.network_min)\n except ValueError:\n pass\n else:\n if len(zones) == 1: zone_field = zones[0] # there should not be more than one zone\n \n # if zone is still *, just broadcast a LkUp on the requesting network and call it done\n if zone_field == b'*':\n rx_port.send(0x0000, 0xFF, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=rx_port.network,\n destination_node=0xFF,\n source_node=rx_port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n # we know the zone, so multicast LkUps to directly-connected networks and send FwdReqs to non-directly-connected ones\n else:\n entries = set(router.routing_table.get_by_network(network)\n for network in router.zone_information_table.networks_in_zone(zone_field))\n entries.discard((None, None))\n for entry, _ in entries:\n if entry.distance == 0:\n entry.port.multicast(zone_field, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=entry.port.network,\n destination_node=0xFF,\n source_node=entry.port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n else:\n router.route(Datagram(hop_count=0,\n destination_network=entry.network_min,\n source_network=0,\n destination_node=0x00,\n source_node=0,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=fwdreq_data))\n \n elif func == self.NBP_CTRL_FWDREQ:\n \n entry, _ = router.routing_table.get_by_network(datagram.destination_network)\n if entry is None or entry.distance != 0: continue # FwdReq thinks we're directly connected to this network but we're not\n entry.port.multicast(zone_field, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=entry.port.network,\n destination_node=0xFF,\n source_node=entry.port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n \n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "RoutingTableAgingService", "path": "tashrouter/service/routing_table_aging.py", "snippet": "class RoutingTableAgingService(Service):\n '''A Service which ages the Router's RoutingTable on a regular basis.'''\n \n DEFAULT_TIMEOUT = 20 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.stop_requested_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.stop_requested_event.set()\n self.stopped_event.wait()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n if self.stop_requested_event.wait(timeout=self.timeout): break\n router.routing_table.age()\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n pass" }, { "identifier": "RtmpRespondingService", "path": "tashrouter/service/rtmp/responding.py", "snippet": "class RtmpRespondingService(Service, RtmpService):\n '''A Service which responds to inbound RTMP Datagrams and maintains the Router's RoutingTable.'''\n \n def __init__(self):\n self.thread = None\n self.started_event = Event()\n self.queue = Queue()\n self.stop_flag = object()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.queue.join()\n \n def _run(self, router):\n \n while True:\n \n if self.started_event.is_set():\n self.queue.task_done()\n else:\n self.started_event.set()\n \n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n \n if datagram.ddp_type == self.RTMP_DDP_TYPE_DATA:\n \n # process header\n if len(datagram.data) < 4: continue # invalid, datagram too short\n sender_network, id_length, sender_node = struct.unpack('>HBB', datagram.data[0:4])\n if id_length != 8: continue # invalid, AppleTalk node numbers are only 8 bits in length\n data = datagram.data[4:]\n if rx_port.extended_network:\n if len(data) < 6: continue # invalid, datagram too short to contain at least one extended network tuple\n sender_network_min, range_distance, sender_network_max, rtmp_version = struct.unpack('>HBHB', data[0:6])\n if range_distance != 0x80: continue # invalid, first tuple must be the sender's extended network tuple\n else:\n if len(data) < 3: continue\n sender_network_min = sender_network_max = sender_network\n zero, rtmp_version = struct.unpack('>HB', data[0:3])\n if zero != 0: continue # invalid, this word must be zero on a nonextended network\n data = data[3:]\n if rtmp_version != self.RTMP_VERSION: continue # invalid, don't recognize this RTMP format\n \n # interpret tuples\n tuples = deque()\n data_idx = 0\n while True:\n packed = data[data_idx:data_idx + 3]\n if len(packed) != 3: break\n network_min, range_distance = struct.unpack('>HB', packed)\n if range_distance & 0x80:\n extended_network = True\n packed = data[data_idx + 3:data_idx + 6]\n if len(packed) != 3: break\n network_max, _ = struct.unpack('>HB', packed)\n data_idx += 6\n else:\n extended_network = False\n network_max = network_min\n data_idx += 3\n tuples.append((extended_network, network_min, network_max, range_distance & 0x1F))\n if data_idx != len(data): continue # invalid, tuples did not end where expected\n \n # if this Port doesn't know its network range yet, accept that this is from the network's seed router\n if rx_port.network_min == rx_port.network_max == 0: rx_port.set_network_range(sender_network_min, sender_network_max)\n \n # resolve the given tuples with the current RoutingTable\n for extended_network, network_min, network_max, distance in tuples:\n # if the entry is too many hops away or is a notify-neighbor entry, mark any entry we have as bad\n if distance >= 15:\n router.routing_table.mark_bad(network_min, network_max)\n # otherwise have the table consider a new entry based on this tuple\n else:\n router.routing_table.consider(RoutingTableEntry(extended_network=extended_network,\n network_min=network_min,\n network_max=network_max,\n distance=distance + 1,\n port=rx_port,\n next_network=sender_network,\n next_node=sender_node))\n \n elif datagram.ddp_type != self.RTMP_DDP_TYPE_REQUEST or not datagram.data:\n \n continue\n \n elif datagram.data[0] == self.RTMP_FUNC_REQUEST:\n \n if 0 in (rx_port.network_min, rx_port.network_max): continue\n if datagram.hop_count != 0: continue # we have to send responses out of the same port they came in, no routing\n response_data = struct.pack('>HBB', rx_port.network, 8, rx_port.node)\n if rx_port.extended_network:\n response_data += struct.pack('>HBHB', rx_port.network_min, 0x80, rx_port.network_max, self.RTMP_VERSION)\n router.reply(datagram, rx_port, self.RTMP_DDP_TYPE_DATA, response_data)\n \n elif datagram.data[0] in (self.RTMP_FUNC_RDR_SPLIT_HORIZON, self.RTMP_FUNC_RDR_NO_SPLIT_HORIZON):\n \n split_horizon = True if datagram.data[0] == self.RTMP_FUNC_RDR_SPLIT_HORIZON else False\n for datagram_data in self.make_routing_table_datagram_data(router, rx_port, split_horizon):\n router.reply(datagram, rx_port, self.RTMP_DDP_TYPE_DATA, datagram_data)\n \n self.queue.task_done()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "RtmpSendingService", "path": "tashrouter/service/rtmp/sending.py", "snippet": "class RtmpSendingService(Service, RtmpService):\n '''A Service which sends RTMP Datagrams containing the Router's RoutingTable to its Ports on a regular basis.'''\n \n DEFAULT_TIMEOUT = 10 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.queue = Queue()\n self.stop_flag = object()\n self.force_send_flag = object()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.queue.join()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n try:\n item = self.queue.get(timeout=self.timeout)\n except Empty:\n item = None\n if item is self.stop_flag: break\n for port in router.ports:\n if 0 in (port.node, port.network): continue\n for datagram_data in self.make_routing_table_datagram_data(router, port):\n port.send(0x0000, 0xFF, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=port.network,\n destination_node=0xFF,\n source_node=port.node,\n destination_socket=self.RTMP_SAS,\n source_socket=self.RTMP_SAS,\n ddp_type=self.RTMP_DDP_TYPE_DATA,\n data=datagram_data))\n if item is not None: self.queue.task_done()\n self.queue.task_done()\n \n def inbound(self, datagram, rx_port):\n pass\n \n def force_send(self):\n '''Force this service to immediately send an RTMP Datagram for testing purposes.'''\n self.queue.put(self.force_send_flag)\n self.queue.join()" }, { "identifier": "ZipRespondingService", "path": "tashrouter/service/zip/responding.py", "snippet": "class ZipRespondingService(Service, ZipService):\n '''A Service that implements Zone Information Protocol (ZIP).'''\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n self._pending_network_zone_name_set = {}\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _reply(self, router, datagram):\n \n if len(datagram.data) < 2: return\n func, count = struct.unpack('>BB', datagram.data[:2])\n data = datagram.data[2:]\n \n networks_and_zone_names = deque()\n while len(data) >= 3:\n network_min, zone_name_length = struct.unpack('>HB', data[:3])\n zone_name = data[3:3 + zone_name_length]\n if len(zone_name) != zone_name_length: break\n data = data[3 + zone_name_length:]\n if zone_name_length == 0: continue\n networks_and_zone_names.append((network_min, zone_name))\n if not networks_and_zone_names: return\n \n network_min_to_network_max = {}\n for entry in router.routing_table:\n network_min_to_network_max[entry.network_min] = entry.network_max\n \n if func == self.ZIP_FUNC_REPLY:\n for network_min, zone_name in networks_and_zone_names:\n try:\n network_max = network_min_to_network_max[network_min]\n except KeyError:\n logging.warning('%s ZIP reply refers to a network range (starting with %d) with which we are not familiar', str(router), \n network_min)\n else:\n try:\n router.zone_information_table.add_networks_to_zone(zone_name, network_min, network_max)\n except ValueError as e:\n logging.warning(\"%s ZIP reply couldn't be added to zone information table: %s\", str(router), e.args[0])\n elif func == self.ZIP_FUNC_EXT_REPLY:\n #TODO this code is fragile and I do not like it\n network_min = None\n for network_min, zone_name in networks_and_zone_names:\n if network_min not in self._pending_network_zone_name_set: self._pending_network_zone_name_set[network_min] = set()\n self._pending_network_zone_name_set[network_min].add(zone_name)\n if network_min is not None and len(self._pending_network_zone_name_set.get(network_min, ())) >= count and count >= 1:\n for zone_name in self._pending_network_zone_name_set.pop(network_min):\n try:\n network_max = network_min_to_network_max[network_min]\n except KeyError:\n logging.warning('%s ZIP reply refers to a network range (starting with %d) with which we are not familiar', str(router),\n network_min)\n else:\n try:\n router.zone_information_table.add_networks_to_zone(zone_name, network_min, network_max)\n except ValueError as e:\n logging.warning(\"%s ZIP reply couldn't be added to zone information table: %s\", str(router), e.args[0])\n \n @classmethod\n def _query(cls, router, datagram, rx_port):\n if len(datagram.data) < 4: return\n network_count = datagram.data[1]\n if len(datagram.data) != (network_count * 2) + 2: return\n # in imitation of AppleTalk Internet Router, we only respond with extended replies even if a regular reply would fit\n # we also give one list per requested network even if the requested networks are in the same range and the lists are the same;\n # that is, if the sender requests zones for networks 3 and 4 and there is a zones list for networks 3-5, we will reply with the\n # zone list for network 3 twice... seems silly, but this is how ATIR does it so *shrug*\n for network_idx in range(network_count):\n requested_network = struct.unpack('>H', datagram.data[(network_idx * 2) + 2:(network_idx * 2) + 4])[0]\n entry, _ = router.routing_table.get_by_network(requested_network)\n if entry is None: continue\n try:\n zone_names = router.zone_information_table.zones_in_network_range(entry.network_min)\n except ValueError:\n continue\n datagram_data = deque()\n datagram_data_length = 0\n for zone_name in chain(zone_names, (None,)):\n list_item = None if zone_name is None else struct.pack('>HB', entry.network_min, len(zone_name)) + zone_name\n if list_item is None or datagram_data_length + len(list_item) > Datagram.MAX_DATA_LENGTH - 2:\n router.reply(datagram, rx_port, cls.ZIP_DDP_TYPE, struct.pack('>BB', cls.ZIP_FUNC_EXT_REPLY,\n len(zone_names)) + b''.join(datagram_data))\n datagram_data = deque()\n datagram_data_length = 0\n if list_item is not None:\n datagram_data.append(list_item)\n datagram_data_length += len(list_item)\n \n @classmethod\n def _get_net_info(cls, router, datagram, rx_port):\n if 0 in (rx_port.network, rx_port.network_min, rx_port.network_max): return\n if len(datagram.data) < 7: return\n if datagram.data[1:6] != b'\\0\\0\\0\\0\\0': return\n given_zone_name = datagram.data[7:7 + datagram.data[6]]\n given_zone_name_ucase = ucase(given_zone_name)\n flags = cls.ZIP_GETNETINFO_ZONE_INVALID | cls.ZIP_GETNETINFO_ONLY_ONE_ZONE\n default_zone_name = None\n number_of_zones = 0\n multicast_address = b''\n try:\n zone_names = router.zone_information_table.zones_in_network_range(rx_port.network_min, rx_port.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't get zone names in port network range for GetNetInfo: %s\", router, e.args[0])\n return\n for zone_name in zone_names:\n number_of_zones += 1\n if default_zone_name is None:\n # zones_in_network_range returns the default zone first\n default_zone_name = zone_name\n multicast_address = rx_port.multicast_address(zone_name)\n if ucase(zone_name) == given_zone_name_ucase:\n flags &= ~cls.ZIP_GETNETINFO_ZONE_INVALID\n multicast_address = rx_port.multicast_address(zone_name)\n if number_of_zones > 1:\n flags &= ~cls.ZIP_GETNETINFO_ONLY_ONE_ZONE\n if not flags & cls.ZIP_GETNETINFO_ZONE_INVALID: break\n if number_of_zones == 0: return\n if not multicast_address: flags |= cls.ZIP_GETNETINFO_USE_BROADCAST\n reply_data = b''.join((\n struct.pack('>BBHHB', cls.ZIP_FUNC_GETNETINFO_REPLY, flags, rx_port.network_min, rx_port.network_max, len(given_zone_name)),\n given_zone_name,\n struct.pack('>B', len(multicast_address)),\n multicast_address,\n struct.pack('>B', len(default_zone_name)) if flags & cls.ZIP_GETNETINFO_ZONE_INVALID else b'',\n default_zone_name if flags & cls.ZIP_GETNETINFO_ZONE_INVALID else b''))\n router.reply(datagram, rx_port, cls.ZIP_DDP_TYPE, reply_data)\n \n @classmethod\n def _get_my_zone(cls, router, datagram, rx_port):\n _, _, tid, _, _, start_index = struct.unpack('>BBHBBH', datagram.data)\n if start_index != 0: return\n entry, _ = router.routing_table.get_by_network(datagram.source_network)\n if entry is None: return\n try:\n zone_name = next(iter(router.zone_information_table.zones_in_network_range(entry.network_min)), None)\n except ValueError:\n return\n if not zone_name: return\n router.reply(datagram, rx_port, cls.ATP_DDP_TYPE, struct.pack('>BBHBBHB',\n cls.ATP_FUNC_TRESP | cls.ATP_EOM,\n 0,\n tid,\n 0,\n 0,\n 1,\n len(zone_name)) + zone_name)\n \n @classmethod\n def _get_zone_list(cls, router, datagram, rx_port, local=False):\n _, _, tid, _, _, start_index = struct.unpack('>BBHBBH', datagram.data)\n if local:\n try:\n zone_iter = iter(router.zone_information_table.zones_in_network_range(rx_port.network_min, rx_port.network_max))\n except ValueError as e:\n logging.warning(\"%s couldn't get zone names in port network range for GetLocalZones: %s\", router, e.args[0])\n return\n else:\n zone_iter = iter(router.zone_information_table.zones())\n for _ in range(start_index - 1): next(zone_iter, None) # skip over start_index-1 entries (index is 1-relative)\n last_flag = 0\n zone_list = deque()\n num_zones = 0\n data_length = 8\n while zone_name := next(zone_iter, None):\n if data_length + 1 + len(zone_name) > Datagram.MAX_DATA_LENGTH: break\n zone_list.append(struct.pack('>B', len(zone_name)))\n zone_list.append(zone_name)\n num_zones += 1\n data_length += 1 + len(zone_name)\n else:\n last_flag = 1\n router.reply(datagram, rx_port, cls.ATP_DDP_TYPE, struct.pack('>BBHBBH',\n cls.ATP_FUNC_TRESP | cls.ATP_EOM,\n 0,\n tid,\n last_flag,\n 0,\n num_zones) + b''.join(zone_list))\n \n def _run(self, router):\n self.started_event.set()\n while True:\n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n if datagram.ddp_type == self.ZIP_DDP_TYPE:\n if not datagram.data: continue\n if datagram.data[0] in (self.ZIP_FUNC_REPLY, self.ZIP_FUNC_EXT_REPLY):\n self._reply(router, datagram)\n elif datagram.data[0] == self.ZIP_FUNC_QUERY:\n self._query(router, datagram, rx_port)\n elif datagram.data[0] == self.ZIP_FUNC_GETNETINFO_REQUEST:\n self._get_net_info(router, datagram, rx_port)\n elif datagram.ddp_type == self.ATP_DDP_TYPE:\n if len(datagram.data) != 8: continue\n control, bitmap, _, func, zero, _ = struct.unpack('>BBHBBH', datagram.data)\n if control != self.ATP_FUNC_TREQ or bitmap != 1 or zero != 0: continue\n if func == self.ZIP_ATP_FUNC_GETMYZONE:\n self._get_my_zone(router, datagram, rx_port)\n elif func == self.ZIP_ATP_FUNC_GETZONELIST:\n self._get_zone_list(router, datagram, rx_port, local=False)\n elif func == self.ZIP_ATP_FUNC_GETLOCALZONES:\n self._get_zone_list(router, datagram, rx_port, local=True)\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "ZipSendingService", "path": "tashrouter/service/zip/sending.py", "snippet": "class ZipSendingService(Service, ZipService):\n '''A Service which sends ZIP queries to fill out its router's Zone Information Table.'''\n \n DEFAULT_TIMEOUT = 10 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.stop_requested_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.stop_requested_event.set()\n self.stopped_event.wait()\n \n def _run(self, router):\n \n self.started_event.set()\n \n while True:\n \n if self.stop_requested_event.wait(timeout=self.timeout): break\n \n queries = {} # (port, network, node) -> network_mins\n for entry in router.routing_table:\n try:\n if next(iter(router.zone_information_table.zones_in_network_range(entry.network_min, entry.network_max)), None): continue\n except ValueError as e:\n logging.warning('%s apparent disjoin between routing table and zone information table: %s', router, e.args[0])\n continue\n if entry.distance == 0:\n key = (entry.port, 0x0000, 0xFF)\n else:\n key = (entry.port, entry.next_network, entry.next_node)\n if key not in queries: queries[key] = deque()\n queries[key].append(entry.network_min)\n \n for port_network_node, network_mins in queries.items():\n port, network, node = port_network_node\n if 0 in (port.node, port.network): continue\n datagram_data = deque()\n for network_min in chain(network_mins, (None,)):\n if network_min is None or len(datagram_data) * 2 + 4 > Datagram.MAX_DATA_LENGTH:\n datagram_data.appendleft(struct.pack('>BB', self.ZIP_FUNC_QUERY, len(datagram_data)))\n port.send(network, node, Datagram(hop_count=0,\n destination_network=network,\n source_network=port.network,\n destination_node=node,\n source_node=port.node,\n destination_socket=self.ZIP_SAS,\n source_socket=self.ZIP_SAS,\n ddp_type=self.ZIP_DDP_TYPE,\n data=b''.join(datagram_data)))\n if network_min is not None: datagram_data = deque((struct.pack('>H', network_min),))\n else:\n datagram_data.append(struct.pack('>H', network_min))\n \n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n pass" } ]
import logging from .routing_table import RoutingTable from .zone_information_table import ZoneInformationTable from ..datagram import Datagram from ..service.echo import EchoService from ..service.name_information import NameInformationService from ..service.routing_table_aging import RoutingTableAgingService from ..service.rtmp.responding import RtmpRespondingService from ..service.rtmp.sending import RtmpSendingService from ..service.zip.responding import ZipRespondingService from ..service.zip.sending import ZipSendingService
11,960
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = (
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = (
(EchoService.ECHO_SAS, EchoService()),
3
2023-12-02 15:17:07+00:00
16k
jags111/ComfyUI_Jags_Audiotools
libs/dance_diffusion/dd/inference.py
[ { "identifier": "SchedulerType", "path": "libs/diffusion_library/scheduler.py", "snippet": "class SchedulerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_SPLICED_DDPM_COSINE = 'V_SPLICED_DDPM_COSINE'\n V_LOG = 'V_LOG'\n V_CRASH = 'V_CRASH'\n \n K_KARRAS = 'K_KARRAS'\n K_EXPONENTIAL = 'K_EXPONENTIAL'\n K_POLYEXPONENTIAL = 'K_POLYEXPONENTIAL'\n K_VP = 'K_VP'\n \n @classmethod\n def is_v_scheduler(cls, value):\n return value[0] == 'V'\n \n def get_step_list(self, n: int, device: str, **schedule_args):\n #if SchedulerType.is_v_scheduler(self):\n # n -= 1\n\n if self == SchedulerType.V_DDPM:\n return torch.nn.functional.pad(vscheduling.get_ddpm_schedule(torch.linspace(1, 0, n)), [0,1], value=0.0).to(device)\n elif self == SchedulerType.V_SPLICED_DDPM_COSINE:\n return vscheduling.get_spliced_ddpm_cosine_schedule(torch.linspace(1, 0, n + 1)).to(device)\n elif self == SchedulerType.V_LOG:\n return torch.nn.functional.pad(\n vscheduling.get_log_schedule(\n torch.linspace(1, 0, n),\n schedule_args.get('min_log_snr', -10.0),\n schedule_args.get('max_log_snr', 10.0)\n ),\n [0,1],\n value=0.0\n ).to(device)\n elif self == SchedulerType.V_CRASH:\n sigma = torch.sin(torch.linspace(1, 0, n + 1) * math.pi / 2) ** 2\n alpha = (1 - sigma ** 2) ** 0.5\n return vscheduling.alpha_sigma_to_t(alpha, sigma).to(device)\n elif self == SchedulerType.K_KARRAS:\n return kscheduling.get_sigmas_karras(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 7.0),\n device = device\n )\n elif self == SchedulerType.K_EXPONENTIAL:\n return kscheduling.get_sigmas_exponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n device = device\n )\n elif self == SchedulerType.K_POLYEXPONENTIAL:\n return kscheduling.get_sigmas_polyexponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 1.0),\n device = device\n )\n elif self == SchedulerType.K_VP:\n return kscheduling.get_sigmas_vp(\n n,\n schedule_args.get('beta_d', 1.205),\n schedule_args.get('beta_min', 0.09),\n schedule_args.get('eps_s', 0.001),\n device = device\n )\n else:\n raise Exception(f\"No get_step_list implementation for scheduler_type '{self}'\")" }, { "identifier": "SamplerType", "path": "libs/diffusion_library/sampler.py", "snippet": "class SamplerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_DDIM = 'V_DDIM'\n V_PRK = 'V_PRK'\n V_PIE = 'V_PIE'\n V_PLMS = 'V_PLMS'\n V_PLMS2 = 'V_PLMS2'\n V_IPLMS = 'V_IPLMS'\n \n K_EULER = 'K_EULER'\n K_EULERA = 'K_EULERA'\n K_HEUN = 'K_HEUN'\n K_DPM2 = 'K_DPM2'\n K_DPM2A = 'K_DPM2A'\n K_LMS = 'K_LMS'\n K_DPMF = 'K_DPMF'\n K_DPMA = 'K_DPMA'\n K_DPMPP2SA = 'K_DPMPP2SA'\n K_DPMPP2M = 'K_DPMPP2M'\n K_DPMPPSDE = 'K_DPMPPSDE'\n\n @classmethod\n def is_v_sampler(cls, value):\n return value[0] == 'V'\n\n def sample(self, model_fn, x_t, steps, callback, **sampler_args) -> torch.Tensor:\n if self == SamplerType.V_DDPM:\n if sampler_args.get('is_reverse'):\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_DDIM:\n if sampler_args.get('is_reverse'): # HACK: Technically incorrect since DDIM implies eta > 0.0\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('eta', 0.1),\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_PRK:\n return vsampling.prk_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PIE:\n return vsampling.pie_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS:\n return vsampling.plms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS2:\n return vsampling.plms2_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_IPLMS:\n return vsampling.iplms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.K_EULER:\n return ksampling.sample_euler(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_EULERA:\n return ksampling.sample_euler_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_HEUN:\n return ksampling.sample_heun(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2:\n return ksampling.sample_dpm_2(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2A:\n return ksampling.sample_dpm_2_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_LMS:\n return ksampling.sample_lms(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 4)\n )\n elif self == SamplerType.K_DPMF:# sample_dpm_fast\n return ksampling.sample_dpm_fast(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('n', 3),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMA:\n return ksampling.sample_dpm_adaptive(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 3),\n sampler_args.get('rtol', 0.05),\n sampler_args.get('atol', 0.0078),\n sampler_args.get('h_init', 0.05),\n sampler_args.get('pcoeff', 0.0),\n sampler_args.get('icoeff', 1.0),\n sampler_args.get('dcoeff', 0.0),\n sampler_args.get('accept_safety', 0.81),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('return_info', False)\n )\n elif self == SamplerType.K_DPMPP2SA:\n return ksampling.sample_dpmpp_2s_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMPP2M:\n return ksampling.sample_dpmpp_2m(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False)\n )\n elif self == SamplerType.K_DPMPPSDE:\n return ksampling.sample_dpmpp_sde(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('r', 1/2)\n )\n else:\n raise Exception(f\"No sample implementation for sampler_type '{self}'\")" }, { "identifier": "ModelWrapperBase", "path": "libs/dance_diffusion/base/model.py", "snippet": "class ModelWrapperBase():\n \n def __init__(self):\n #self.uuid: str = None\n #self.name: str = None\n self.path: str = None\n \n self.device_accelerator: torch.device = None\n \n self.chunk_size: int = None\n self.sample_rate: int = None\n \n \n def load(\n self,\n path: str,\n device_accelerator: torch.device,\n optimize_memory_use:bool=False,\n chunk_size: int=131072,\n sample_rate: int=48000\n ):\n raise NotImplementedError" }, { "identifier": "InferenceBase", "path": "libs/dance_diffusion/base/inference.py", "snippet": "class InferenceBase():\n def __init__(\n self,\n device_accelerator: torch.device,\n device_offload: torch.device,\n optimize_memory_use: bool,\n use_autocast: bool,\n model: ModelWrapperBase\n ):\n self.device_accelerator = device_accelerator\n self.device_offload = device_offload if(optimize_memory_use==True) else None\n self.optimize_memory_use = optimize_memory_use\n self.use_autocast = use_autocast\n self.model = model\n self.generator = torch.Generator(device_accelerator)# if (device_accelerator.type != 'mps') else torch.device('cpu'))\n self.rng_state = None\n \n def set_device_accelerator(\n self,\n device: torch.device = None\n ):\n self.device_accelerator = device\n \n def get_device_accelerator(\n self\n ) -> torch.device:\n return self.device_accelerator\n \n def set_model(\n self,\n model: ModelWrapperBase = None\n ):\n self.model = model\n \n def get_model(\n self\n ) -> ModelWrapperBase:\n return self.model\n\n def expand(\n self,\n tensor: torch.Tensor,\n expansion_map: list[int]\n ) -> torch.Tensor:\n out = torch.empty([0], device=self.device_accelerator)\n \n for i in range(tensor.shape[0]):\n out = torch.cat([out, tensor[i,:,:].expand(expansion_map[i], -1, -1)], 0)\n \n return out\n \n \n # def cc_randn(self, shape:tuple, seed:int, device:torch.device, dtype = None, rng_state_in:torch.Tensor = None):\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([shape[0], shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty(shape,device=device, dtype=dtype, device=device)\n \n # for sample in range(shape[0]):\n # for channel in range(shape[1]):\n # self.generator.manual_seed(seed + sample * shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([shape[2]], generator=self.generator, dtype=dtype, device=device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n # def cc_randn_like(self, input:torch.Tensor, seed:int, rng_state_in:torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([input.shape[0], input.shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty_like(input)\n \n # for sample in range(input.shape[0]):\n # for channel in range(input.shape[1]):\n # self.generator.manual_seed(seed + sample * input.shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([input.shape[2]], generator=self.generator, dtype=input.dtype, device=input.device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n \n def autocast_context(self):\n if self.device_accelerator.type == 'cuda':\n return torch.cuda.amp.autocast()\n elif self.device_accelerator.type == 'cpu':\n return torch.cpu.amp.autocast()\n elif self.device_accelerator.type == 'mps':\n return nullcontext()\n else:\n return torch.autocast(self.device_accelerator.type, dtype=torch.float32)\n\n @contextlib.contextmanager\n def offload_context(self, model):\n \"\"\"\n Used by inference implementations, this context manager moves the\n passed model to the inference's `device_accelerator` device on enter,\n and then returns it to the `device_offload` device on exit.\n\n It also wraps the `inference.autocast_context()` context.\n \"\"\"\n\n autocast = self.autocast_context() if self.use_autocast else nullcontext()\n \n with autocast:\n if self.optimize_memory_use:\n model.to(self.device_accelerator)\n\n yield None\n\n if self.optimize_memory_use:\n model.to(self.device_offload)" }, { "identifier": "tensor_slerp_2D", "path": "libs/util/util.py", "snippet": "def tensor_slerp_2D(a: torch.Tensor, b: torch.Tensor, t: float):\n slerped = torch.empty_like(a)\n \n for channel in range(a.size(0)):\n slerped[channel] = tensor_slerp(a[channel], b[channel], t)\n \n return slerped" }, { "identifier": "PosteriorSampling", "path": "libs/util/util.py", "snippet": "class PosteriorSampling(torch.nn.Module):\n def __init__(self, model, x_T, measurement, mask, scale):\n super().__init__()\n self.model = model\n self.x_prev = x_T\n self.measurement = measurement\n self.mask = mask\n self.scale = scale\n \n @torch.enable_grad()\n def forward(self, input, sigma, **kwargs):\n x_t = input.detach().requires_grad_()\n out = self.model(x_t, sigma, **kwargs)\n difference = (self.measurement - out) * self.mask\n norm = torch.linalg.norm(difference)\n norm_grad = torch.autograd.grad(outputs=norm, inputs=x_t)[0].detach()\n \n return out.detach() - self.scale * norm_grad\n \n # x_t = input.detach().requires_grad_()\n # x_0_hat = self.model(input, sigma, **kwargs).detach().requires_grad_()\n \n # difference = (self.measurement - x_0_hat) * self.mask\n # norm = torch.linalg.norm(difference)\n # norm_grad = torch.autograd.grad(outputs=norm, inputs=self.x_prev)[0].detach()\n \n # self.x_prev = x_t.detach().requires_grad_()\n \n # return x_t.detach() - norm_grad * self.scale" }, { "identifier": "SchedulerType", "path": "libs/diffusion_library/scheduler.py", "snippet": "class SchedulerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_SPLICED_DDPM_COSINE = 'V_SPLICED_DDPM_COSINE'\n V_LOG = 'V_LOG'\n V_CRASH = 'V_CRASH'\n \n K_KARRAS = 'K_KARRAS'\n K_EXPONENTIAL = 'K_EXPONENTIAL'\n K_POLYEXPONENTIAL = 'K_POLYEXPONENTIAL'\n K_VP = 'K_VP'\n \n @classmethod\n def is_v_scheduler(cls, value):\n return value[0] == 'V'\n \n def get_step_list(self, n: int, device: str, **schedule_args):\n #if SchedulerType.is_v_scheduler(self):\n # n -= 1\n\n if self == SchedulerType.V_DDPM:\n return torch.nn.functional.pad(vscheduling.get_ddpm_schedule(torch.linspace(1, 0, n)), [0,1], value=0.0).to(device)\n elif self == SchedulerType.V_SPLICED_DDPM_COSINE:\n return vscheduling.get_spliced_ddpm_cosine_schedule(torch.linspace(1, 0, n + 1)).to(device)\n elif self == SchedulerType.V_LOG:\n return torch.nn.functional.pad(\n vscheduling.get_log_schedule(\n torch.linspace(1, 0, n),\n schedule_args.get('min_log_snr', -10.0),\n schedule_args.get('max_log_snr', 10.0)\n ),\n [0,1],\n value=0.0\n ).to(device)\n elif self == SchedulerType.V_CRASH:\n sigma = torch.sin(torch.linspace(1, 0, n + 1) * math.pi / 2) ** 2\n alpha = (1 - sigma ** 2) ** 0.5\n return vscheduling.alpha_sigma_to_t(alpha, sigma).to(device)\n elif self == SchedulerType.K_KARRAS:\n return kscheduling.get_sigmas_karras(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 7.0),\n device = device\n )\n elif self == SchedulerType.K_EXPONENTIAL:\n return kscheduling.get_sigmas_exponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n device = device\n )\n elif self == SchedulerType.K_POLYEXPONENTIAL:\n return kscheduling.get_sigmas_polyexponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 1.0),\n device = device\n )\n elif self == SchedulerType.K_VP:\n return kscheduling.get_sigmas_vp(\n n,\n schedule_args.get('beta_d', 1.205),\n schedule_args.get('beta_min', 0.09),\n schedule_args.get('eps_s', 0.001),\n device = device\n )\n else:\n raise Exception(f\"No get_step_list implementation for scheduler_type '{self}'\")" }, { "identifier": "SamplerType", "path": "libs/diffusion_library/sampler.py", "snippet": "class SamplerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_DDIM = 'V_DDIM'\n V_PRK = 'V_PRK'\n V_PIE = 'V_PIE'\n V_PLMS = 'V_PLMS'\n V_PLMS2 = 'V_PLMS2'\n V_IPLMS = 'V_IPLMS'\n \n K_EULER = 'K_EULER'\n K_EULERA = 'K_EULERA'\n K_HEUN = 'K_HEUN'\n K_DPM2 = 'K_DPM2'\n K_DPM2A = 'K_DPM2A'\n K_LMS = 'K_LMS'\n K_DPMF = 'K_DPMF'\n K_DPMA = 'K_DPMA'\n K_DPMPP2SA = 'K_DPMPP2SA'\n K_DPMPP2M = 'K_DPMPP2M'\n K_DPMPPSDE = 'K_DPMPPSDE'\n\n @classmethod\n def is_v_sampler(cls, value):\n return value[0] == 'V'\n\n def sample(self, model_fn, x_t, steps, callback, **sampler_args) -> torch.Tensor:\n if self == SamplerType.V_DDPM:\n if sampler_args.get('is_reverse'):\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_DDIM:\n if sampler_args.get('is_reverse'): # HACK: Technically incorrect since DDIM implies eta > 0.0\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('eta', 0.1),\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_PRK:\n return vsampling.prk_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PIE:\n return vsampling.pie_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS:\n return vsampling.plms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS2:\n return vsampling.plms2_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_IPLMS:\n return vsampling.iplms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.K_EULER:\n return ksampling.sample_euler(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_EULERA:\n return ksampling.sample_euler_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_HEUN:\n return ksampling.sample_heun(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2:\n return ksampling.sample_dpm_2(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2A:\n return ksampling.sample_dpm_2_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_LMS:\n return ksampling.sample_lms(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 4)\n )\n elif self == SamplerType.K_DPMF:# sample_dpm_fast\n return ksampling.sample_dpm_fast(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('n', 3),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMA:\n return ksampling.sample_dpm_adaptive(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 3),\n sampler_args.get('rtol', 0.05),\n sampler_args.get('atol', 0.0078),\n sampler_args.get('h_init', 0.05),\n sampler_args.get('pcoeff', 0.0),\n sampler_args.get('icoeff', 1.0),\n sampler_args.get('dcoeff', 0.0),\n sampler_args.get('accept_safety', 0.81),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('return_info', False)\n )\n elif self == SamplerType.K_DPMPP2SA:\n return ksampling.sample_dpmpp_2s_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMPP2M:\n return ksampling.sample_dpmpp_2m(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False)\n )\n elif self == SamplerType.K_DPMPPSDE:\n return ksampling.sample_dpmpp_sde(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('r', 1/2)\n )\n else:\n raise Exception(f\"No sample implementation for sampler_type '{self}'\")" }, { "identifier": "ModelWrapperBase", "path": "libs/dance_diffusion/base/model.py", "snippet": "class ModelWrapperBase():\n \n def __init__(self):\n #self.uuid: str = None\n #self.name: str = None\n self.path: str = None\n \n self.device_accelerator: torch.device = None\n \n self.chunk_size: int = None\n self.sample_rate: int = None\n \n \n def load(\n self,\n path: str,\n device_accelerator: torch.device,\n optimize_memory_use:bool=False,\n chunk_size: int=131072,\n sample_rate: int=48000\n ):\n raise NotImplementedError" }, { "identifier": "InferenceBase", "path": "libs/dance_diffusion/base/inference.py", "snippet": "class InferenceBase():\n def __init__(\n self,\n device_accelerator: torch.device,\n device_offload: torch.device,\n optimize_memory_use: bool,\n use_autocast: bool,\n model: ModelWrapperBase\n ):\n self.device_accelerator = device_accelerator\n self.device_offload = device_offload if(optimize_memory_use==True) else None\n self.optimize_memory_use = optimize_memory_use\n self.use_autocast = use_autocast\n self.model = model\n self.generator = torch.Generator(device_accelerator)# if (device_accelerator.type != 'mps') else torch.device('cpu'))\n self.rng_state = None\n \n def set_device_accelerator(\n self,\n device: torch.device = None\n ):\n self.device_accelerator = device\n \n def get_device_accelerator(\n self\n ) -> torch.device:\n return self.device_accelerator\n \n def set_model(\n self,\n model: ModelWrapperBase = None\n ):\n self.model = model\n \n def get_model(\n self\n ) -> ModelWrapperBase:\n return self.model\n\n def expand(\n self,\n tensor: torch.Tensor,\n expansion_map: list[int]\n ) -> torch.Tensor:\n out = torch.empty([0], device=self.device_accelerator)\n \n for i in range(tensor.shape[0]):\n out = torch.cat([out, tensor[i,:,:].expand(expansion_map[i], -1, -1)], 0)\n \n return out\n \n \n # def cc_randn(self, shape:tuple, seed:int, device:torch.device, dtype = None, rng_state_in:torch.Tensor = None):\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([shape[0], shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty(shape,device=device, dtype=dtype, device=device)\n \n # for sample in range(shape[0]):\n # for channel in range(shape[1]):\n # self.generator.manual_seed(seed + sample * shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([shape[2]], generator=self.generator, dtype=dtype, device=device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n # def cc_randn_like(self, input:torch.Tensor, seed:int, rng_state_in:torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([input.shape[0], input.shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty_like(input)\n \n # for sample in range(input.shape[0]):\n # for channel in range(input.shape[1]):\n # self.generator.manual_seed(seed + sample * input.shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([input.shape[2]], generator=self.generator, dtype=input.dtype, device=input.device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n \n def autocast_context(self):\n if self.device_accelerator.type == 'cuda':\n return torch.cuda.amp.autocast()\n elif self.device_accelerator.type == 'cpu':\n return torch.cpu.amp.autocast()\n elif self.device_accelerator.type == 'mps':\n return nullcontext()\n else:\n return torch.autocast(self.device_accelerator.type, dtype=torch.float32)\n\n @contextlib.contextmanager\n def offload_context(self, model):\n \"\"\"\n Used by inference implementations, this context manager moves the\n passed model to the inference's `device_accelerator` device on enter,\n and then returns it to the `device_offload` device on exit.\n\n It also wraps the `inference.autocast_context()` context.\n \"\"\"\n\n autocast = self.autocast_context() if self.use_autocast else nullcontext()\n \n with autocast:\n if self.optimize_memory_use:\n model.to(self.device_accelerator)\n\n yield None\n\n if self.optimize_memory_use:\n model.to(self.device_offload)" }, { "identifier": "tensor_slerp_2D", "path": "libs/util/util.py", "snippet": "def tensor_slerp_2D(a: torch.Tensor, b: torch.Tensor, t: float):\n slerped = torch.empty_like(a)\n \n for channel in range(a.size(0)):\n slerped[channel] = tensor_slerp(a[channel], b[channel], t)\n \n return slerped" }, { "identifier": "PosteriorSampling", "path": "libs/util/util.py", "snippet": "class PosteriorSampling(torch.nn.Module):\n def __init__(self, model, x_T, measurement, mask, scale):\n super().__init__()\n self.model = model\n self.x_prev = x_T\n self.measurement = measurement\n self.mask = mask\n self.scale = scale\n \n @torch.enable_grad()\n def forward(self, input, sigma, **kwargs):\n x_t = input.detach().requires_grad_()\n out = self.model(x_t, sigma, **kwargs)\n difference = (self.measurement - out) * self.mask\n norm = torch.linalg.norm(difference)\n norm_grad = torch.autograd.grad(outputs=norm, inputs=x_t)[0].detach()\n \n return out.detach() - self.scale * norm_grad\n \n # x_t = input.detach().requires_grad_()\n # x_0_hat = self.model(input, sigma, **kwargs).detach().requires_grad_()\n \n # difference = (self.measurement - x_0_hat) * self.mask\n # norm = torch.linalg.norm(difference)\n # norm_grad = torch.autograd.grad(outputs=norm, inputs=self.x_prev)[0].detach()\n \n # self.x_prev = x_t.detach().requires_grad_()\n \n # return x_t.detach() - norm_grad * self.scale" } ]
import torch from tqdm.auto import trange from diffusion.utils import t_to_alpha_sigma from k_diffusion.external import VDenoiser from typing import Tuple, Callable from libs.diffusion_library.scheduler import SchedulerType from libs.diffusion_library.sampler import SamplerType from libs.dance_diffusion.base.model import ModelWrapperBase from libs.dance_diffusion.base.inference import InferenceBase from libs.util.util import tensor_slerp_2D, PosteriorSampling from typing import Tuple, Callable from libs.diffusion_library.scheduler import SchedulerType from libs.diffusion_library.sampler import SamplerType from libs.dance_diffusion.base.model import ModelWrapperBase from libs.dance_diffusion.base.inference import InferenceBase from libs.util.util import tensor_slerp_2D, PosteriorSampling
10,836
x_T, step_list, callback, **sampler_args ).float() def generate_variation( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) audio_source = self.expand(audio_source, expansion_map) if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] alpha_T, sigma_T = t_to_alpha_sigma(step_list[0]) x_T = alpha_T * audio_source + sigma_T * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) x_T = audio_source + step_list[0] * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = VDenoiser(self.model.model) with self.offload_context(self.model.model): return sampler.sample( model, x_T, step_list, callback, **sampler_args ).float() def generate_interpolation( self, callback: Callable = None, batch_size: int = None, # seed: int = None, interpolation_positions: list[float] = None, audio_source: torch.Tensor = None, audio_target: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling model = VDenoiser(self.model.model) if self.optimize_memory_use and batch_size < 2: x_0_source = audio_source x_0_target = audio_target with self.offload_context(self.model.model): x_T_source = sampler.sample( model, x_0_source, step_list.flip(0), callback, **sampler_args ) with self.offload_context(self.model.model): x_T_target = sampler.sample( model, x_0_target, step_list.flip(0), callback, **sampler_args ) x_T = torch.cat([x_T_source, x_T_target], dim=0) else: x_0 = torch.cat([audio_source, audio_target], dim=0) with self.offload_context(self.model.model): x_T = sampler.sample( model, x_0, step_list.flip(0), callback, **sampler_args ) if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack step_list[-1] = 0.0 else: step_list = torch.cat([step_list, step_list.new_zeros([1])]) x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator) for pos in range(len(interpolation_positions)):
class DDInference(InferenceBase): def __init__( self, device_accelerator: torch.device = None, device_offload: torch.device = None, optimize_memory_use: bool = False, use_autocast: bool = True, model: ModelWrapperBase = None ): super().__init__(device_accelerator, device_offload, optimize_memory_use, use_autocast, model) def generate( self, callback: Callable = None, batch_size: int = None, seed: int = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args: dict = None, sampler: SamplerType = None, sampler_args: dict = None, **kwargs ): self.generator.manual_seed(seed) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)#step_list = step_list[:-1] if sampler in [SamplerType.V_PRK, SamplerType.V_PLMS, SamplerType.V_PIE, SamplerType.V_PLMS2, SamplerType.V_IPLMS] else step_list if SamplerType.is_v_sampler(sampler): x_T = torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator) model = self.model.model else: x_T = step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator) model = VDenoiser(self.model.model) with self.offload_context(self.model.model): return sampler.sample( model, x_T, step_list, callback, **sampler_args ).float() def generate_variation( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) audio_source = self.expand(audio_source, expansion_map) if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] alpha_T, sigma_T = t_to_alpha_sigma(step_list[0]) x_T = alpha_T * audio_source + sigma_T * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) x_T = audio_source + step_list[0] * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = VDenoiser(self.model.model) with self.offload_context(self.model.model): return sampler.sample( model, x_T, step_list, callback, **sampler_args ).float() def generate_interpolation( self, callback: Callable = None, batch_size: int = None, # seed: int = None, interpolation_positions: list[float] = None, audio_source: torch.Tensor = None, audio_target: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling model = VDenoiser(self.model.model) if self.optimize_memory_use and batch_size < 2: x_0_source = audio_source x_0_target = audio_target with self.offload_context(self.model.model): x_T_source = sampler.sample( model, x_0_source, step_list.flip(0), callback, **sampler_args ) with self.offload_context(self.model.model): x_T_target = sampler.sample( model, x_0_target, step_list.flip(0), callback, **sampler_args ) x_T = torch.cat([x_T_source, x_T_target], dim=0) else: x_0 = torch.cat([audio_source, audio_target], dim=0) with self.offload_context(self.model.model): x_T = sampler.sample( model, x_0, step_list.flip(0), callback, **sampler_args ) if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack step_list[-1] = 0.0 else: step_list = torch.cat([step_list, step_list.new_zeros([1])]) x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator) for pos in range(len(interpolation_positions)):
x_Int[pos] = tensor_slerp_2D(x_T[0], x_T[1], interpolation_positions[pos])
10
2023-11-28 09:09:59+00:00
16k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_tess_pv_stochastic.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Optimal Load Flow with Steady State Security\"},\n IEEE Transactions on Power Apparatus and Systems, Vol. PAS 93, No. 3,\n 1974, pp. 745-751.\n\n ... with branch parameters rounded to nearest 0.01, shunt values divided\n by 100 and shunt on bus 10 moved to bus 5, load at bus 5 zeroed out.\n Generator locations, costs and limits and bus areas were taken from ...\n\n Ferrero, R.W., Shahidehpour, S.M., Ramesh, V.C., I{\"Transaction analysis\n in deregulated power systems using game theory\"}, IEEE Transactions on\n Power Systems, Vol. 12, No. 3, Aug 1997, pp. 1340-1347.\n\n Generator Q limits were derived from Alsac & Stott, using their Pmax\n capacities. V limits and line |S| limits taken from Alsac & Stott.\n\n @return: Power flow data for 30 bus, 6 generator case.\n @see: U{http://www.pserc.cornell.edu/matpower/}\n \"\"\"\n ppc = {\"version\": '2'}\n\n ##----- Power Flow Data -----##\n ## system MVA base\n ppc[\"baseMVA\"] = 100.0\n\n ## bus data\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\n ppc[\"bus\"] = array([\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [2, 1, 0.1, 0.06, 0, 0, 1, 1, 0, 12.66, 1, 1.1, 0.95],\n [3, 1, 0.09, 0.04, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [4, 1, 0.12, 0.08, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [5, 1, 0.06, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [6, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [7, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [8, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [9, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [10, 1, 0.06, 0.02, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [11, 1, 0.045, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [12, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [13, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [14, 1, 0.12, 0.08, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [15, 1, 0.06, 0.01, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [16, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [17, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [18, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [19, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [20, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [21, 1, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [22, 2, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [23, 2, 0.09, 0.05, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [24, 1, 0.42, 0.20, 0, 0.04, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [25, 1, 0.42, 0.2, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [26, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [27, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [28, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [29, 1, 0.12, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [30, 1, 0.2, 0.6, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [31, 1, 0.15, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [32, 1, 0.21, 0.1, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [33, 1, 0.06, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n ])\n\n ## generator data\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf, start-up time, shut-down time and initial condition!\n ppc[\"gen\"] = array([\n [1, 23.54, 0, 150, -20, 1, 100, 1, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1],\n ])\n\n ## branch data\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\n ppc[\"branch\"] = array([\n [1, 2, 0.057525912, 0.029324489, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [2, 3, 0.307595167, 0.15666764, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [3, 4, 0.228356656, 0.116299674, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [4, 5, 0.237777928, 0.121103899, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [5, 6, 0.510994811, 0.441115179, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [6, 7, 0.116798814, 0.386084969, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [7, 8, 0.44386045, 0.146684835, 0, 90, 90, 90, 0, 0, 1, -360, 360],\n [8, 9, 0.642643047, 0.461704714, 0, 70, 70, 70, 0, 0, 1, -360, 360],\n [9, 10, 0.651378001, 0.461704714, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [10, 11, 0.122663712, 0.040555144, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [11, 12, 0.233597628, 0.077241951, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [12, 13, 0.915922324, 0.720633708, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [13, 14, 0.337917936, 0.444796338, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [14, 15, 0.368739846, 0.328184702, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [15, 16, 0.465635443, 0.340039282, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [16, 17, 0.804239697, 1.073775422, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [17, 18, 0.456713311, 0.358133116, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [2, 19, 0.102323747, 0.097644308, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [19, 20, 0.938508419, 0.845668336, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [20, 21, 0.255497406, 0.298485858, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [21, 22, 0.442300637, 0.584805173, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [3, 23, 0.28151509, 0.192356167, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [23, 24, 0.560284909, 0.442425422, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [24, 25, 0.559037059, 0.43743402, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [6, 26, 0.126656834, 0.064513875, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [26, 27, 0.177319567, 0.090281989, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [27, 28, 0.660736881, 0.582559042, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [28, 29, 0.501760717, 0.437122057, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [29, 30, 0.316642084, 0.161284687, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [30, 31, 0.607952801, 0.600840053, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [31, 32, 0.193728802, 0.225798562, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [32, 33, 0.212758523, 0.330805188, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [7, 20, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [8, 14, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [11, 21, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [17, 32, 0.3120, 0.3120, 0, 65, 65, 65, 0, 0, 0, -360, 360],\n [24, 28, 0.3120, 0.3120, 0, 16, 16, 16, 0, 0, 0, -360, 360]\n ])\n\n ##----- OPF Data -----##\n ## area data\n # area refbus\n ppc[\"areas\"] = array([\n [1, 8],\n [2, 23],\n [3, 26],\n ])\n\n ## generator cost data\n # 1 startup shutdown n x1 y1 ... xn yn\n # 2 startup shutdown n c(n-1) ... c0\n ppc[\"gencost\"] = array([\n [0, 0, 0, 3, 0.0, 20, 0]\n ])\n\n return ppc" }, { "identifier": "micro_grid", "path": "TestCasesMicrogrids/test_cases/cases_unit_commitment.py", "snippet": "AC_PD = array([323.0284, 308.2374, 318.1886, 307.9809, 331.2170, 368.6539, 702.0040, 577.7045, 1180.4547, 1227.6240,\n 1282.9344, 1311.9738, 1268.9502, 1321.7436, 1323.9218, 1327.1464, 1386.9117, 1321.6387, 1132.0476,\n 1109.2701, 882.5698, 832.4520, 349.3568, 299.9920])\nDC_PD = array([287.7698, 287.7698, 287.7698, 287.7698, 299.9920, 349.3582, 774.4047, 664.0625, 1132.6996, 1107.7366,\n 1069.6837, 1068.9819, 1027.3295, 1096.3820, 1109.4778, 1110.7039, 1160.1270, 1078.7839, 852.2514,\n 791.5814, 575.4085, 551.1441, 349.3568, 299.992])\nDG = {\"PMIN\": 0,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST_A\": 0.01,\n \"COST_B\": 0.5}\nUG = {\"PMIN\": -5,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST\": Price_UG, } # The cost should be a profile\nESS = {\"PDC_MAX\": 5,\n \"PCH_MAX\": 5,\n \"EFF_DC\": 0.95,\n \"EFF_CH\": 0.95,\n \"E0\": 10,\n \"EMIN\": 5,\n \"EMAX\": 20, }\nBIC = {\"PMAX\": 5,\n \"QMAX\": 5,\n \"SMAX\": 5,\n \"EFF_AC2DC\": 0.9,\n \"EFF_DC2AC\": 0.9, }\nMG = {\"PMAX\": 5,\n \"PMIN\": -5,\n \"QMAX\": 5,\n \"QMIN\": -5\n }\nPD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5,\n \"DC\": DC_PD / max(DC_PD),\n \"DC_MAX\": 5}\nQD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5, }\nPV = {\"PMAX\": 0,\n \"COST\": 0}" }, { "identifier": "PBIC_AC2DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_AC2DC = 4" }, { "identifier": "PG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PG = 0" }, { "identifier": "PESS_DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_DC = 8" }, { "identifier": "PBIC_DC2AC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_DC2AC = 5" }, { "identifier": "PUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PUG = 2" }, { "identifier": "PESS_CH", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_CH = 7" }, { "identifier": "PMESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PMESS = 11 # Reactive power unit commitment of" }, { "identifier": "EESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "EESS = 9" }, { "identifier": "NX_MG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "NX_MG = 12" }, { "identifier": "QBIC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QBIC = 6" }, { "identifier": "QUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QUG = 3" }, { "identifier": "QG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QG = 1" }, { "identifier": "PPV", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PPV = 10" }, { "identifier": "DataBaseManagement", "path": "TestCaseDistributionSystems/database_management_pv.py", "snippet": "class DataBaseManagement():\n\n def __init__(self, host=\"localhost\", user=\"ems\", password=\"12345678\", db=\"mess_pv\"):\n \"\"\"\n Initialized the database connection string\n :param host: host ip\n :param user: user name\n :param password: password\n :param db: database name\n :return\n \"\"\"\n self.db = pymysql.connect(host=host, user=user, password=password, db=db)\n\n def create_table(self, table_name, nl=32, nb=33, ng=6, nmg=3):\n \"\"\"\n Creat table name\n :param table_name:\n :param nb:\n :param nb:\n :param ng:\n :return: no return value\n \"\"\"\n cursor = self.db.cursor()\n sql = \"DROP TABLE IF EXISTS \"\n cursor.execute(sql + table_name)\n if table_name == \"distribution_networks\":\n sql_start = \"\"\"CREATE TABLE distribution_networks (\"\"\"\n sql = 'SCENARIO INT,\\n TIME INT NOT NULL,\\n '\n for i in range(nl):\n sql += \"PIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"QIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"IIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nb):\n sql += \"V{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng - 1):\n sql += \"QG{0} DECIMAL(8,6),\\n \".format(i)\n sql += \"QG{0} DECIMAL(8,6)\\n \".format(ng - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"micro_grids\":\n sql_start = \"\"\"CREATE TABLE micro_grids (\"\"\"\n sql = 'SCENARIO INT,\\n MG INT,\\n TIME INT,\\n '\n sql += 'PG DECIMAL(8,4),\\n QG DECIMAL(8,4),\\n PUG DECIMAL(8,4),\\n QUG DECIMAL(8,4),\\n '\n sql += 'PBIC_AC2DC DECIMAL(8,4),\\n PBIC_DC2AC DECIMAL(8,4),\\n QBIC DECIMAL(8,4),\\n PESS_CH DECIMAL(7,4),\\n '\n sql += 'PESS_DC DECIMAL(8,4),\\n EESS DECIMAL(8,4),\\n PPV DECIMAL(8,4),\\n PMESS DECIMAL(8,4)'\n sql_end = \"\"\")\"\"\"\n elif table_name == \"mobile_energy_storage_systems\":\n sql_start = \"\"\"CREATE TABLE mobile_energy_storage_systems (\"\"\"\n sql = 'SCENARIO INT,\\n MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"EESS DECIMAL(8,4)\\n \"\n sql_end = \"\"\")\"\"\"\n elif table_name == \"first_stage_solutions\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE first_stage_solutions (\"\"\"\n sql = 'TIME INT,\\n'\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"IESS{0} INT,\\n \".format(i)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"ESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"IESS{0} INT,\\n \".format(nmg - 1)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"ESS{0} DECIMAL(8,4)\\n \".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"fisrt_stage_mess\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE fisrt_stage_mess (\"\"\"\n sql = 'MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"IDC_MG{0} INT,\\n \".format(i)\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"RMESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"MESS_F_STOP INT,\\n \"\n sql += \"MESS_T_STOP INT\\n \"\n sql_end = \"\"\")\"\"\"\n else:\n sql_start = \"\"\"CREATE TABLE scenarios (\"\"\"\n sql = 'SCENARIO INT,\\n WEIGHT DECIMAL(8,4),\\n TIME INT,\\n'\n for i in range(nb):\n sql += \"PD{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_AC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_DC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PPV{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PPV{0} DECIMAL(8,4)\\n\".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n\n cursor.execute(sql_start + sql + sql_end)\n cursor.close()\n\n def insert_data_ds(self, table_name, nl=32, nb=33, ng=6, scenario=0, time=0, pij=0, qij=0, lij=0, vi=0, pg=0, qg=0):\n \"\"\"\n Insert data into table_name\n :param table_name:\n :param nl:\n :param nb:\n :param ng:\n :param pij:\n :param qij:\n :param lij:\n :param vi:\n :param pg:\n :param qg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,TIME,\"\n value = \"{0},{1},\".format(scenario, time)\n for i in range(nl):\n sql += \"PIJ{0},\".format(i)\n value += \"{0},\".format(pij[i])\n for i in range(nl):\n sql += \"QIJ{0},\".format(i)\n value += \"{0},\".format(qij[i])\n for i in range(nl):\n sql += \"IIJ{0},\".format(i)\n value += \"{0},\".format(lij[i])\n for i in range(nb):\n sql += \"V{0},\".format(i)\n value += \"{0},\".format(vi[i])\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n for i in range(ng - 1):\n sql += \"QG{0},\".format(i)\n value += \"{0},\".format(qg[i])\n sql += \"QG{0}\".format(ng - 1)\n value += \"{0}\".format(qg[ng - 1])\n\n sql += \") VALUES (\" + value + \")\"\n\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mg(self, table_name, scenario=0, time=0, mg=0, pg=0, qg=0, pug=0, qug=0, pbic_ac2dc=0, pbic_dc2ac=0,\n qbic=0, pess_ch=0, pess_dc=0, eess=0, pmess=0, ppv=0):\n \"\"\"\n insert microgrid data\n :param table_name:\n :param scenario:\n :param time:\n :param mg:\n :param pg:\n :param qg:\n :param pug:\n :param qug:\n :param pbic_ac2dc:\n :param pbic_dc2ac:\n :param qbic:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param pmess:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MG,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mg, time)\n sql += \"PG,QG,PUG,QUG,PBIC_AC2DC,PBIC_DC2AC,QBIC,PESS_CH,PESS_DC,EESS,PPV,PMESS\"\n value += \"{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11}\".format(pg, qg, pug, qug, pbic_ac2dc, pbic_dc2ac,\n qbic, pess_ch, pess_dc, eess, ppv, pmess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage_mess(self, table_name, time=0, mess=0, imess=[0, 0, 0], pmess_ch=[0, 0, 0],\n pmess_dc=[0, 0, 0], rmess=[0, 0, 0], mess_f_stop=0, mess_t_stop=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data in the first-stage\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"MESS,TIME,\"\n value = \"{0},{1},\".format(mess, time)\n for i in range(nmg):\n sql += \"IDC_MG{0},\".format(i)\n value += \"{0},\".format(imess[i])\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n for i in range(nmg):\n sql += \"RMESS{0},\".format(i)\n value += \"{0},\".format(rmess[i])\n sql += \"MESS_F_STOP,MESS_T_STOP\"\n value += \"{0},{1}\".format(mess_f_stop, mess_t_stop)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mess(self, table_name, scenario=0, time=0, mess=0, pmess_ch=[0, 0, 0], pmess_dc=[0, 0, 0],\n emess=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MESS,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mess, time)\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n sql += \"EESS\"\n value += \"{0}\".format(emess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage(self, table_name, time=0, ng=2, nmg=2, pg=[0, 0], rg=[0, 0], pg_mg=[0, 0],\n rg_mg=[0, 0], iess=[0, 0], pess_dc=[0, 0], pess_ch=[0, 0], ress=[0, 0], ess=[0, 0]):\n \"\"\"\n insert scenario data\n :param table_name:\n :param scenario:\n :param weight:\n :param time:\n :param nb:\n :param nmg:\n :param pd:\n :param pd_ac:\n :param pd_dc:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"TIME,\"\n value = \"{0},\".format(time)\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n sql += \"RG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n value += \"{0},\".format(rg[i])\n if nmg > 1:\n for i in range(nmg - 1):\n sql += \"PG_MG{0},\".format(i)\n sql += \"RG_MG{0},\".format(i)\n sql += \"IESS{0},\".format(i)\n sql += \"PESS_DC{0},\".format(i)\n sql += \"PESS_CH{0},\".format(i)\n sql += \"RESS{0},\".format(i)\n sql += \"ESS{0},\".format(i)\n value += \"{0},\".format(pg_mg[i])\n value += \"{0},\".format(rg_mg[i])\n value += \"{0},\".format(iess[i])\n value += \"{0},\".format(pess_dc[i])\n value += \"{0},\".format(pess_ch[i])\n value += \"{0},\".format(ress[i])\n value += \"{0},\".format(ess[i])\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg[nmg - 1])\n value += \"{0},\".format(rg_mg[nmg - 1])\n value += \"{0},\".format(iess[nmg - 1])\n value += \"{0},\".format(pess_dc[nmg - 1])\n value += \"{0},\".format(pess_ch[nmg - 1])\n value += \"{0},\".format(ress[nmg - 1])\n value += \"{0}\".format(ess[nmg - 1])\n else:\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg)\n value += \"{0},\".format(rg_mg)\n value += \"{0},\".format(iess)\n value += \"{0},\".format(pess_dc)\n value += \"{0},\".format(pess_ch)\n value += \"{0},\".format(ress)\n value += \"{0}\".format(ess)\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_scenario(self, table_name, scenario=0, weight=0, time=0, nb=1, nmg=2, pd=[0, 0], pd_ac=[0, 0],\n pd_dc=[0, 0], ppv=[0, 0]):\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,WEIGHT,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, weight, time)\n for i in range(nb):\n sql += \"PD{0},\".format(i)\n value += \"{0},\".format(pd[i])\n for i in range(nmg):\n sql += \"PD_AC{0},\".format(i)\n value += \"{0},\".format(pd_ac[i])\n for i in range(nmg):\n sql += \"PD_DC{0},\".format(i)\n value += \"{0},\".format(pd_dc[i])\n for i in range(nmg - 1):\n sql += \"PPV{0},\".format(i)\n value += \"{0},\".format(ppv[i])\n if nmg > 1:\n sql += \"PPV{0}\".format(nmg - 1)\n value += \"{0}\".format(ppv[nmg - 1])\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def inquery_data_scenario(self, table_name, scenario=0, time=0):\n cursor = self.db.cursor()\n # sql = \"SELECT * FROM \" + table_name + \" ;\"\n sql = \"SELECT * FROM \" + table_name + \" WHERE SCENARIO={0} AND TIME={1};\".format(scenario, time)\n cursor.execute(sql)\n data = cursor.fetchall()\n n_data = len(data[0])\n\n temp = []\n for i in range(n_data): temp.append(float(data[0][i]))\n\n cursor.close()\n return temp" }, { "identifier": "ScenarioReduction", "path": "StochasticOptimization/scenario_reduction.py", "snippet": "class ScenarioReduction():\n def __init__(self):\n self.name = \"Scenario reduction\"\n\n def run(self, scenario, weight, n_reduced, power):\n \"\"\"\n\n :param scenario: A fan scenario tree, when more stage are considered, some merge operation can be implemented\n :param weight: Weight of each scenario\n :param n_reduced: Number of scenarios needs to be reduced\n :param power: The power in the distance calculation\n :return:\n \"\"\"\n n_scenario = scenario.shape[0] # number of original scenarios\n c = zeros((n_scenario, n_scenario))\n # Calculate the c matrix\n for i in range(n_scenario):\n for j in range(n_scenario):\n c[i, j] = linalg.norm((scenario[i, :] - scenario[j, :]), 2)\n c[i, j] = max([1, linalg.norm(scenario[i, :], power - 1), linalg.norm(scenario[j, :], power - 1)]) * \\\n c[i, j]\n\n J = arange(n_scenario) # The original index range\n J_reduced = array([])\n # Implement the iteration\n for n in range(n_reduced): # find the minimal distance\n print(\"The reduction is in process {0}\".format(n))\n c_n = inf * ones(n_scenario)\n c_n[J] = 0\n for u in J:\n # Delete the i-th distance\n J_temp = delete(J, where(J == u))\n for k in J_temp:\n c_k_j = delete(c[int(k)], J_temp)\n c_n[int(u)] += weight[int(k)] * min(c_k_j)\n u_i = argmin(c_n)\n J_reduced = append(J_reduced, u_i)\n J = delete(J, where(J == u_i))\n # Optimal redistribution\n p_s = weight.copy()\n p_s[J_reduced.astype(int)] = 0\n\n for i in J_reduced:\n c_temp = c[int(i), :]\n c_temp[J_reduced.astype(int)] = inf\n index = argmin(c_temp)\n p_s[index] += weight[int(i)]\n\n scenario_reduced = scenario[J.astype(int), :]\n weight_reduced = p_s[J.astype(int)]\n\n return scenario_reduced, weight_reduced" } ]
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from numpy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG_PV import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG, PPV from TestCaseDistributionSystems.database_management_pv import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
12,823
self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formulation(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {}
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: [email protected] @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formulation(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {}
db_management = DataBaseManagement()
15
2023-11-27 15:57:53+00:00
16k
andryyy/ehlocomputer
models/listeners.py
[ { "identifier": "defaults", "path": "config/defaults.py", "snippet": "ACCEPT_LANGUAGES = [\"en\", \"de\"]\nMAX_HISTORIC_REVISIONS = 5\nWEBAUTHN_CHALLENGE_TIMEOUT = 30 # seconds\nPROXY_AUTH_TIMEOUT = 300 # seconds\nTABLE_PAGE_SIZE = 10\nTINYDB = {\n \"storage\": RedisLockMiddleware(JSONStorage),\n \"sort_keys\": True,\n \"indent\": 2,\n}\nPODMAN_BINARY = \"/usr/bin/podman\"\nTRUSTED_PROXIES = [\"127.0.0.1\", \"::1\"]" }, { "identifier": "lego", "path": "config/lego.py", "snippet": "DNS_PROVIDERS = {\n \"allinkl\": [\n {\"ALL_INKL_LOGIN\": \"KAS login\"},\n {\"ALL_INKL_PASSWORD\": \"KAS password\"},\n {\"ALL_INKL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ALL_INKL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ALL_INKL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"arvancloud\": [\n {\"ARVANCLOUD_API_KEY\": \"API key\"},\n {\"ARVANCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ARVANCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ARVANCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ARVANCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"autodns\": [\n {\"AUTODNS_API_PASSWORD\": \"User Password\"},\n {\"AUTODNS_API_USER\": \"Username\"},\n {\"AUTODNS_CONTEXT\": \"API context (4 for production, 1 for testing. Defaults to 4)\"},\n {\"AUTODNS_HTTP_TIMEOUT\": \"API request timeout, defaults to 30 seconds\"},\n {\"AUTODNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"AUTODNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"AUTODNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"azure\": [\n {\"AZURE_CLIENT_ID\": \"Client ID\"},\n {\"AZURE_CLIENT_SECRET\": \"Client secret\"},\n {\"AZURE_ENVIRONMENT\": \"Azure environment, one of: public, usgovernment, and china\"},\n {\"AZURE_RESOURCE_GROUP\": \"DNS zone resource group\"},\n {\"AZURE_SUBSCRIPTION_ID\": \"DNS zone subscription ID\"},\n {\"AZURE_TENANT_ID\": \"Tenant ID\"},\n {\"AZURE_METADATA_ENDPOINT\": \"Metadata Service endpoint URL\"},\n {\"AZURE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"AZURE_PRIVATE_ZONE\": \"Set to true to use Azure Private DNS Zones and not public\"},\n {\"AZURE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"AZURE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"AZURE_ZONE_NAME\": \"Zone name to use inside Azure DNS service to add the TXT record in\"},\n ],\n \"bindman\": [\n {\"BINDMAN_MANAGER_ADDRESS\": \"The server URL, should have scheme, hostname, and port (if required) of the Bindman-DNS Manager server\"},\n {\"BINDMAN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BINDMAN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BINDMAN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"bluecat\": [\n {\"BLUECAT_CONFIG_NAME\": \"Configuration name\"},\n {\"BLUECAT_DNS_VIEW\": \"External DNS View Name\"},\n {\"BLUECAT_PASSWORD\": \"API password\"},\n {\"BLUECAT_SERVER_URL\": \"The server URL, should have scheme, hostname, and port (if required) of the authoritative Bluecat BAM serve\"},\n {\"BLUECAT_USER_NAME\": \"API username\"},\n {\"BLUECAT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BLUECAT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BLUECAT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BLUECAT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"brandit\": [\n {\"BRANDIT_API_KEY\": \"The API key\"},\n {\"BRANDIT_API_USERNAME\": \"The API username\"},\n {\"BRANDIT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BRANDIT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BRANDIT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BRANDIT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"bunny\": [\n {\"BUNNY_API_KEY\": \"API key\"},\n {\"BUNNY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BUNNY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BUNNY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"checkdomain\": [\n {\"CHECKDOMAIN_TOKEN\": \"API token\"},\n {\"CHECKDOMAIN_HTTP_TIMEOUT\": \"API request timeout, defaults to 30 seconds\"},\n {\"CHECKDOMAIN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CHECKDOMAIN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CHECKDOMAIN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"civo\": [\n {\"CIVO_TOKEN\": \"Authentication token\"},\n {\"CIVO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CIVO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CIVO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"clouddns\": [\n {\"CLOUDDNS_CLIENT_ID\": \"Client ID\"},\n {\"CLOUDDNS_EMAIL\": \"Account email\"},\n {\"CLOUDDNS_PASSWORD\": \"Account password\"},\n {\"CLOUDDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudflare\": [\n {\"CLOUDFLARE_API_KEY\": \"Alias to CF_API_KEY\"},\n {\"CLOUDFLARE_DNS_API_TOKEN\": \"Alias to CF_DNS_API_TOKEN\"},\n {\"CLOUDFLARE_EMAIL\": \"Alias to CF_API_EMAIL\"},\n {\"CLOUDFLARE_ZONE_API_TOKEN\": \"Alias to CF_ZONE_API_TOKEN\"},\n {\"CLOUDFLARE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDFLARE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDFLARE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDFLARE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudns\": [\n {\"CLOUDNS_AUTH_ID\": \"The API user ID\"},\n {\"CLOUDNS_AUTH_PASSWORD\": \"The password for API user ID\"},\n {\"CLOUDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDNS_SUB_AUTH_ID\": \"The API sub user ID\"},\n {\"CLOUDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudru\": [\n {\"CLOUDRU_KEY_ID\": \"Key ID (login)\"},\n {\"CLOUDRU_SECRET\": \"Key Secret\"},\n {\"CLOUDRU_SERVICE_INSTANCE_ID\": \"Service Instance ID (parentId)\"},\n {\"CLOUDRU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDRU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDRU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDRU_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"CLOUDRU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudxns\": [\n {\"CLOUDXNS_API_KEY\": \"The API key\"},\n {\"CLOUDXNS_SECRET_KEY\": \"The API secret key\"},\n {\"CLOUDXNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDXNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDXNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDXNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"conoha\": [\n {\"CONOHA_API_PASSWORD\": \"The API password\"},\n {\"CONOHA_API_USERNAME\": \"The API username\"},\n {\"CONOHA_TENANT_ID\": \"Tenant ID\"},\n {\"CONOHA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CONOHA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CONOHA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CONOHA_REGION\": \"The region\"},\n {\"CONOHA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"constellix\": [\n {\"CONSTELLIX_API_KEY\": \"User API key\"},\n {\"CONSTELLIX_SECRET_KEY\": \"User secret key\"},\n {\"CONSTELLIX_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CONSTELLIX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CONSTELLIX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CONSTELLIX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"derak\": [\n {\"DERAK_API_KEY\": \"The API key\"},\n {\"DERAK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DERAK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DERAK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DERAK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"DERAK_WEBSITE_ID\": \"Force the zone/website ID\"},\n ],\n \"desec\": [\n {\"DESEC_TOKEN\": \"Domain token\"},\n {\"DESEC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DESEC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DESEC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DESEC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"designate\": [\n {\"DESIGNATE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DESIGNATE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DESIGNATE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnshomede\": [\n {\"DNSHOMEDE_CREDENTIALS\": \"Comma-separated list of domain:password credential pairs\"}\n ],\n \"dnsimple\": [\n {\"DNSIMPLE_OAUTH_TOKEN\": \"OAuth token\"},\n {\"DNSIMPLE_BASE_URL\": \"API endpoint URL\"},\n {\"DNSIMPLE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSIMPLE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSIMPLE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnsmadeeasy\": [\n {\"DNSMADEEASY_API_KEY\": \"The API key\"},\n {\"DNSMADEEASY_API_SECRET\": \"The API Secret key\"},\n {\"DNSMADEEASY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DNSMADEEASY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSMADEEASY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSMADEEASY_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"DNSMADEEASY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnspod\": [\n {\"DNSPOD_API_KEY\": \"The user token\"},\n {\"DNSPOD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DNSPOD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSPOD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSPOD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dode\": [\n {\"DODE_TOKEN\": \"API token\"},\n {\"DODE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DODE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DODE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DODE_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"DODE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"domeneshop\": [\n {\"DOMENESHOP_API_SECRET\": \"API secret\"},\n {\"DOMENESHOP_API_TOKEN\": \"API token\"},\n {\"DOMENESHOP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DOMENESHOP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DOMENESHOP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"dreamhost\": [\n {\"DREAMHOST_API_KEY\": \"The API key\"},\n {\"DREAMHOST_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DREAMHOST_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DREAMHOST_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DREAMHOST_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"duckdns\": [\n {\"DUCKDNS_TOKEN\": \"Account token\"},\n {\"DUCKDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DUCKDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DUCKDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DUCKDNS_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"DUCKDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dyn\": [\n {\"DYN_CUSTOMER_NAME\": \"Customer name\"},\n {\"DYN_PASSWORD\": \"Password\"},\n {\"DYN_USER_NAME\": \"User name\"},\n {\"DYN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DYN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DYN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DYN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dynu\": [\n {\"DYNU_API_KEY\": \"API key\"},\n {\"DYNU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DYNU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DYNU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DYNU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"easydns\": [\n {\"EASYDNS_KEY\": \"API Key\"},\n {\"EASYDNS_TOKEN\": \"API Token\"},\n {\"EASYDNS_ENDPOINT\": \"The endpoint URL of the API Server\"},\n {\"EASYDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EASYDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EASYDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EASYDNS_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"EASYDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"efficientip\": [\n {\"EFFICIENTIP_DNS_NAME\": \"DNS name (ex: dns.smart)\"},\n {\"EFFICIENTIP_HOSTNAME\": \"Hostname (ex: foo.example.com)\"},\n {\"EFFICIENTIP_PASSWORD\": \"Password\"},\n {\"EFFICIENTIP_USERNAME\": \"Username\"},\n {\"EFFICIENTIP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EFFICIENTIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EFFICIENTIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EFFICIENTIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"EFFICIENTIP_VIEW_NAME\": \"View name (ex: external)\"},\n ],\n \"epik\": [\n {\"EPIK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EPIK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EPIK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EPIK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"exoscale\": [\n {\"EXOSCALE_API_KEY\": \"API key\"},\n {\"EXOSCALE_API_SECRET\": \"API secret\"},\n {\"EXOSCALE_API_ZONE\": \"API zone\"},\n {\"EXOSCALE_ENDPOINT\": \"API endpoint URL\"},\n {\"EXOSCALE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EXOSCALE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EXOSCALE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EXOSCALE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"freemyip\": [\n {\"FREEMYIP_TOKEN\": \"Account token\"},\n {\"FREEMYIP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"FREEMYIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"FREEMYIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"FREEMYIP_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"FREEMYIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gandi\": [\n {\"GANDI_API_KEY\": \"API key\"},\n {\"GANDI_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GANDI_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GANDI_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GANDI_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gandiv5\": [\n {\"GANDIV5_API_KEY\": \"API key\"},\n {\"GANDIV5_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GANDIV5_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GANDIV5_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GANDIV5_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gcore\": [\n {\"GCORE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GCORE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GCORE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GCORE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"glesys\": [\n {\"GLESYS_API_KEY\": \"API key\"},\n {\"GLESYS_API_USER\": \"API user\"},\n {\"GLESYS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GLESYS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GLESYS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GLESYS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"godaddy\": [\n {\"GODADDY_API_KEY\": \"API key\"},\n {\"GODADDY_API_SECRET\": \"API secret\"},\n {\"GODADDY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GODADDY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GODADDY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GODADDY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"hetzner\": [\n {\"HETZNER_API_KEY\": \"API key\"},\n {\"HETZNER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HETZNER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HETZNER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HETZNER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"hostingde\": [\n {\"HOSTINGDE_API_KEY\": \"API key\"},\n {\"HOSTINGDE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HOSTINGDE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HOSTINGDE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HOSTINGDE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"HOSTINGDE_ZONE_NAME\": \"Zone name in ACE format\"},\n ],\n \"hosttech\": [\n {\"HOSTTECH_API_KEY\": \"API login\"},\n {\"HOSTTECH_PASSWORD\": \"API password\"},\n {\"HOSTTECH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HOSTTECH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HOSTTECH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HOSTTECH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"httpreq\": [\n {\"HTTPREQ_ENDPOINT\": \"The URL of the server\"},\n {\"HTTPREQ_MODE\": \"'RAW', none\"},\n {\"HTTPREQ_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HTTPREQ_PASSWORD\": \"Basic authentication password\"},\n {\"HTTPREQ_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HTTPREQ_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HTTPREQ_USERNAME\": \"Basic authentication username\"},\n ],\n \"hurricane\": [\n {\"HURRICANE_TOKENS\": \"TXT record names and tokens\"}\n ],\n \"hyperone\": [\n {\"HYPERONE_LOCATION_ID\": \"Specifies location (region) to be used in API calls. (default pl-waw-1)\"},\n {\"HYPERONE_PASSPORT_LOCATION\": \"Allows to pass custom passport file location (default ~/.h1/passport.json)\"},\n {\"HYPERONE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HYPERONE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HYPERONE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"iij\": [\n {\"IIJ_API_ACCESS_KEY\": \"API access key\"},\n {\"IIJ_API_SECRET_KEY\": \"API secret key\"},\n {\"IIJ_DO_SERVICE_CODE\": \"DO service code\"},\n {\"IIJ_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IIJ_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IIJ_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"IIJ_DPF_API_TOKEN\": \"API token\"},\n {\"IIJ_DPF_DPM_SERVICE_CODE\": \"IIJ Managed DNS Service's service code\"},\n {\"IIJ_DPF_POLLING_INTERVAL\": \"Time between DNS propagation check, defaults to 5 second\"},\n {\"IIJ_DPF_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation, defaults to 660 second\"},\n {\"IIJ_DPF_TTL\": \"The TTL of the TXT record used for the DNS challenge, default to 300\"},\n ],\n \"infoblox\": [\n {\"INFOBLOX_HOST\": \"Host URI\"},\n {\"INFOBLOX_PASSWORD\": \"Account Password\"},\n {\"INFOBLOX_USERNAME\": \"Account Username\"},\n {\"INFOBLOX_DNS_VIEW\": \"The view for the TXT records, default: External\"},\n {\"INFOBLOX_HTTP_TIMEOUT\": \"HTTP request timeout\"},\n {\"INFOBLOX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INFOBLOX_PORT\": \"The port for the infoblox grid manager, default: 443\"},\n {\"INFOBLOX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"INFOBLOX_SSL_VERIFY\": \"Whether or not to verify the TLS certificate, default: true\"},\n {\"INFOBLOX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"INFOBLOX_WAPI_VERSION\": \"The version of WAPI being used, default: 2.11\"},\n ],\n \"infomaniak\": [\n {\"INFOMANIAK_ACCESS_TOKEN\": \"Access token\"},\n {\"INFOMANIAK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"INFOMANIAK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INFOMANIAK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"INFOMANIAK_TTL\": \"The TTL of the TXT record used for the DNS challenge in seconds\"},\n ],\n \"inwx\": [\n {\"INWX_PASSWORD\": \"Password\"},\n {\"INWX_USERNAME\": \"Username\"},\n {\"INWX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INWX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation (default 360s)\"},\n {\"INWX_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"INWX_SHARED_SECRET\": \"shared secret related to 2FA\"},\n {\"INWX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ionos\": [\n {\"IONOS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IONOS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IONOS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IONOS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ipv64\": [\n {\"IPV64_API_KEY\": \"Account API Key\"},\n {\"IPV64_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IPV64_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IPV64_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IPV64_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"IPV64_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"iwantmyname\": [\n {\"IWANTMYNAME_PASSWORD\": \"API password\"},\n {\"IWANTMYNAME_USERNAME\": \"API username\"},\n {\"IWANTMYNAME_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IWANTMYNAME_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IWANTMYNAME_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IWANTMYNAME_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"joker\": [\n {\"JOKER_API_KEY\": \"API key (only with DMAPI mode)\"},\n {\"JOKER_API_MODE\": \"'DMAPI' or 'SVC'. DMAPI is for resellers accounts. (Default: DMAPI)\"},\n {\"JOKER_PASSWORD\": \"Joker.com password\"},\n {\"JOKER_USERNAME\": \"Joker.com username\"},\n {\"JOKER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"JOKER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"JOKER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"JOKER_SEQUENCE_INTERVAL\": \"Time between sequential requests (only with 'SVC' mode)\"},\n {\"JOKER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"liara\": [\n {\"LIARA_API_KEY\": \"The API key\"},\n {\"LIARA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LIARA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LIARA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LIARA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"lightsail\": [\n {\"LIGHTSAIL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LIGHTSAIL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"linode\": [\n {\"LINODE_TOKEN\": \"API token\"},\n {\"LINODE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LINODE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LINODE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LINODE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"loopia\": [\n {\"LOOPIA_API_PASSWORD\": \"API password\"},\n {\"LOOPIA_API_USER\": \"API username\"},\n {\"LOOPIA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LOOPIA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LOOPIA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LOOPIA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"luadns\": [\n {\"LUADNS_API_TOKEN\": \"API token\"},\n {\"LUADNS_API_USERNAME\": \"Username (your email)\"},\n {\"LUADNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LUADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LUADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LUADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"metaname\": [\n {\"METANAME_ACCOUNT_REFERENCE\": \"The four-digit reference of a Metaname account\"},\n {\"METANAME_API_KEY\": \"API Key\"},\n {\"METANAME_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"METANAME_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"METANAME_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"mydnsjp\": [\n {\"MYDNSJP_MASTER_ID\": \"Master ID\"},\n {\"MYDNSJP_PASSWORD\": \"Password\"},\n {\"MYDNSJP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"MYDNSJP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"MYDNSJP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"MYDNSJP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"mythicbeasts\": [\n {\"MYTHICBEASTS_PASSWORD\": \"Password\"},\n {\"MYTHICBEASTS_USERNAME\": \"User name\"},\n {\"MYTHICBEASTS_API_ENDPOINT\": \"The endpoint for the API (must implement v2)\"},\n {\"MYTHICBEASTS_AUTH_API_ENDPOINT\": \"The endpoint for Mythic Beasts' Authentication\"},\n {\"MYTHICBEASTS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"MYTHICBEASTS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"MYTHICBEASTS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"MYTHICBEASTS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"namecheap\": [\n {\"NAMECHEAP_API_KEY\": \"API key\"},\n {\"NAMECHEAP_API_USER\": \"API user\"},\n {\"NAMECHEAP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NAMECHEAP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NAMECHEAP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NAMECHEAP_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"NAMECHEAP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"namesilo\": [\n {\"NAMESILO_API_KEY\": \"Client ID\"},\n {\"NAMESILO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NAMESILO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation, it is better to set larger than 15m\"},\n {\"NAMESILO_TTL\": \"The TTL of the TXT record used for the DNS challenge, should be in [3600, 2592000]\"},\n ],\n \"nearlyfreespeech\": [\n {\"NEARLYFREESPEECH_API_KEY\": \"API Key for API requests\"},\n {\"NEARLYFREESPEECH_LOGIN\": \"Username for API requests\"},\n {\"NEARLYFREESPEECH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NEARLYFREESPEECH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NEARLYFREESPEECH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NEARLYFREESPEECH_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"NEARLYFREESPEECH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"netcup\": [\n {\"NETCUP_API_KEY\": \"API key\"},\n {\"NETCUP_API_PASSWORD\": \"API password\"},\n {\"NETCUP_CUSTOMER_NUMBER\": \"Customer number\"},\n {\"NETCUP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NETCUP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NETCUP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NETCUP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"netlify\": [\n {\"NETLIFY_TOKEN\": \"Token\"},\n {\"NETLIFY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NETLIFY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NETLIFY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NETLIFY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nicmanager\": [\n {\"NICMANAGER_API_EMAIL\": \"Email-based login\"},\n {\"NICMANAGER_API_LOGIN\": \"Login, used for Username-based login\"},\n {\"NICMANAGER_API_PASSWORD\": \"Password, always required\"},\n {\"NICMANAGER_API_USERNAME\": \"Username, used for Username-based login\"},\n {\"NICMANAGER_API_MODE\": \"mode: 'anycast' or 'zone' (default: 'anycast')\"},\n {\"NICMANAGER_API_OTP\": \"TOTP Secret (optional)\"},\n {\"NICMANAGER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NICMANAGER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NICMANAGER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NICMANAGER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nifcloud\": [\n {\"NIFCLOUD_ACCESS_KEY_ID\": \"Access key\"},\n {\"NIFCLOUD_SECRET_ACCESS_KEY\": \"Secret access key\"},\n {\"NIFCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NIFCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NIFCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NIFCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"njalla\": [\n {\"NJALLA_TOKEN\": \"API token\"},\n {\"NJALLA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NJALLA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NJALLA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NJALLA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nodion\": [\n {\"NODION_API_TOKEN\": \"The API token\"},\n {\"NODION_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NODION_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NODION_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NODION_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ns1\": [\n {\"NS1_API_KEY\": \"API key\"},\n {\"NS1_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NS1_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NS1_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NS1_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"otc\": [\n {\"OTC_DOMAIN_NAME\": \"Domain name\"},\n {\"OTC_IDENTITY_ENDPOINT\": \"Identity endpoint URL\"},\n {\"OTC_PASSWORD\": \"Password\"},\n {\"OTC_PROJECT_NAME\": \"Project name\"},\n {\"OTC_USER_NAME\": \"User name\"},\n {\"OTC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"OTC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"OTC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"OTC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ovh\": [\n {\"OVH_APPLICATION_KEY\": \"Application key\"},\n {\"OVH_APPLICATION_SECRET\": \"Application secret\"},\n {\"OVH_CONSUMER_KEY\": \"Consumer key\"},\n {\"OVH_ENDPOINT\": \"Endpoint URL (ovh-eu or ovh-ca)\"},\n {\"OVH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"OVH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"OVH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"OVH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"pdns\": [\n {\"PDNS_API_KEY\": \"API key\"},\n {\"PDNS_API_URL\": \"API URL\"},\n {\"PDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PDNS_SERVER_NAME\": \"Name of the server in the URL, 'localhost' by default\"},\n {\"PDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"plesk\": [\n {\"PLESK_PASSWORD\": \"API password\"},\n {\"PLESK_USERNAME\": \"API username\"},\n {\"PLESK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PLESK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PLESK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PLESK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"porkbun\": [\n {\"PORKBUN_API_KEY\": \"API key\"},\n {\"PORKBUN_SECRET_API_KEY\": \"secret API key\"},\n {\"PORKBUN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PORKBUN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PORKBUN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PORKBUN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rackspace\": [\n {\"RACKSPACE_API_KEY\": \"API key\"},\n {\"RACKSPACE_USER\": \"API user\"},\n {\"RACKSPACE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RACKSPACE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RACKSPACE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RACKSPACE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rcodezero\": [\n {\"RCODEZERO_API_TOKEN\": \"API token\"},\n {\"RCODEZERO_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RCODEZERO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RCODEZERO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RCODEZERO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"regru\": [\n {\"REGRU_PASSWORD\": \"API password\"},\n {\"REGRU_USERNAME\": \"API username\"},\n {\"REGRU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"REGRU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"REGRU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"REGRU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rfc2136\": [\n {\"RFC2136_NAMESERVER\": \"Network address in the form 'host' or 'host:port'\"},\n {\"RFC2136_TSIG_KEY\": \"Name of the secret key as defined in DNS server configuration. To disable TSIG authentication, leave the 'RFC2136_TSIG*' variables unset.\"},\n {\"RFC2136_TSIG_SECRET\": \"Secret key payload. To disable TSIG authentication, leave the' RFC2136_TSIG*' variables unset.\"},\n {\"RFC2136_DNS_TIMEOUT\": \"API request timeout\"},\n {\"RFC2136_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RFC2136_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RFC2136_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"RFC2136_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rimuhosting\": [\n {\"RIMUHOSTING_API_KEY\": \"User API key\"},\n {\"RIMUHOSTING_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RIMUHOSTING_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RIMUHOSTING_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RIMUHOSTING_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"safedns\": [\n {\"SAFEDNS_AUTH_TOKEN\": \"Authentication token\"},\n {\"SAFEDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SAFEDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SAFEDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SAFEDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"sakuracloud\": [\n {\"SAKURACLOUD_ACCESS_TOKEN\": \"Access token\"},\n {\"SAKURACLOUD_ACCESS_TOKEN_SECRET\": \"Access token secret\"},\n {\"SAKURACLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SAKURACLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SAKURACLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SAKURACLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"scaleway\": [\n {\"SCALEWAY_API_TOKEN\": \"API token\"},\n {\"SCALEWAY_PROJECT_ID\": \"Project to use (optional)\"},\n {\"SCALEWAY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SCALEWAY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SCALEWAY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"selectel\": [\n {\"SELECTEL_API_TOKEN\": \"API token\"},\n {\"SELECTEL_BASE_URL\": \"API endpoint URL\"},\n {\"SELECTEL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SELECTEL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SELECTEL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SELECTEL_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"servercow\": [\n {\"SERVERCOW_PASSWORD\": \"API password\"},\n {\"SERVERCOW_USERNAME\": \"API username\"},\n {\"SERVERCOW_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SERVERCOW_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SERVERCOW_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SERVERCOW_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"simply\": [\n {\"SIMPLY_ACCOUNT_NAME\": \"Account name\"},\n {\"SIMPLY_API_KEY\": \"API key\"},\n {\"SIMPLY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SIMPLY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SIMPLY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SIMPLY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"sonic\": [\n {\"SONIC_API_KEY\": \"API Key\"},\n {\"SONIC_USER_ID\": \"User ID\"},\n {\"SONIC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SONIC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SONIC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SONIC_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"SONIC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"stackpath\": [\n {\"STACKPATH_CLIENT_ID\": \"Client ID\"},\n {\"STACKPATH_CLIENT_SECRET\": \"Client secret\"},\n {\"STACKPATH_STACK_ID\": \"Stack ID\"},\n {\"STACKPATH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"STACKPATH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"STACKPATH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"tencentcloud\": [\n {\"TENCENTCLOUD_SECRET_ID\": \"Access key ID\"},\n {\"TENCENTCLOUD_SECRET_KEY\": \"Access Key secret\"},\n {\"TENCENTCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"TENCENTCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"TENCENTCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"TENCENTCLOUD_REGION\": \"Region\"},\n {\"TENCENTCLOUD_SESSION_TOKEN\": \"Access Key token\"},\n {\"TENCENTCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"transip\": [\n {\"TRANSIP_ACCOUNT_NAME\": \"Account name\"},\n {\"TRANSIP_PRIVATE_KEY_PATH\": \"Private key path\"},\n {\"TRANSIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"TRANSIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"TRANSIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ultradns\": [\n {\"ULTRADNS_PASSWORD\": \"API Password\"},\n {\"ULTRADNS_USERNAME\": \"API Username\"},\n {\"ULTRADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ULTRADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ULTRADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vegadns\": [\n {\"VEGADNS_URL\": \"API endpoint URL\"},\n {\"VEGADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VEGADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VEGADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vercel\": [\n {\"VERCEL_API_TOKEN\": \"Authentication token\"},\n {\"VERCEL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VERCEL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VERCEL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VERCEL_TEAM_ID\": \"Team ID (ex: team_xxxxxxxxxxxxxxxxxxxxxxxx)\"},\n {\"VERCEL_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"versio\": [\n {\"VERSIO_PASSWORD\": \"Basic authentication password\"},\n {\"VERSIO_USERNAME\": \"Basic authentication username\"},\n {\"VERSIO_ENDPOINT\": \"The endpoint URL of the API Server\"},\n {\"VERSIO_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VERSIO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VERSIO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VERSIO_SEQUENCE_INTERVAL\": \"Time between sequential requests, default 60s\"},\n {\"VERSIO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vinyldns\": [\n {\"VINYLDNS_ACCESS_KEY\": \"The VinylDNS API key\"},\n {\"VINYLDNS_HOST\": \"The VinylDNS API URL\"},\n {\"VINYLDNS_SECRET_KEY\": \"The VinylDNS API Secret key\"},\n {\"VINYLDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VINYLDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VINYLDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vscale\": [\n {\"VSCALE_API_TOKEN\": \"API token\"},\n {\"VSCALE_BASE_URL\": \"API endpoint URL\"},\n {\"VSCALE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VSCALE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VSCALE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VSCALE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vultr\": [\n {\"VULTR_API_KEY\": \"API key\"},\n {\"VULTR_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VULTR_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VULTR_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VULTR_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"websupport\": [\n {\"WEBSUPPORT_API_KEY\": \"API key\"},\n {\"WEBSUPPORT_SECRET\": \"API secret\"},\n {\"WEBSUPPORT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"WEBSUPPORT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"WEBSUPPORT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"WEBSUPPORT_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"WEBSUPPORT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"wedos\": [\n {\"WEDOS_USERNAME\": \"Username is the same as for the admin account\"},\n {\"WEDOS_WAPI_PASSWORD\": \"Password needs to be generated and IP allowed in the admin interface\"},\n {\"WEDOS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"WEDOS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"WEDOS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"WEDOS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"zoneee\": [\n {\"ZONEEE_API_KEY\": \"API key\"},\n {\"ZONEEE_API_USER\": \"API user\"},\n {\"ZONEEE_ENDPOINT\": \"API endpoint URL\"},\n {\"ZONEEE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ZONEEE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ZONEEE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ZONEEE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"zonomi\": [\n {\"ZONOMI_API_KEY\": \"User API key\"},\n {\"ZONOMI_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ZONOMI_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ZONOMI_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ZONOMI_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n}" }, { "identifier": "utc_now_as_str", "path": "utils/helpers.py", "snippet": "def utc_now_as_str():\n return datetime.now(timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%S%z\")" }, { "identifier": "ensure_list", "path": "utils/helpers.py", "snippet": "@validate_call\ndef ensure_list(s: str | list[str] | None) -> list:\n if s:\n if isinstance(s, str):\n return [s]\n if isinstance(s, list):\n return s\n return []" }, { "identifier": "to_unique_sorted_str_list", "path": "utils/helpers.py", "snippet": "@validate_call\ndef to_unique_sorted_str_list(l: list[str]) -> list:\n _l = [x for x in set(l) if x != \"\"]\n return sorted(_l, key=lambda x: str(x))" }, { "identifier": "get_validated_fqdn", "path": "utils/helpers.py", "snippet": "@validate_call\ndef get_validated_fqdn(hostname: str) -> str:\n regex = re.compile(\n r\"^((?![-])[-A-Z\\d]{1,63}(?<!-)[.])*(?!-)[-A-Z\\d]{1,63}(?<!-)?$\", re.IGNORECASE\n )\n if len(hostname) > 253:\n raise ValueError(f\"{hostname} is too long\")\n if regex.match(hostname):\n return hostname\n else:\n raise ValueError(f\"{hostname} is not a valid FQDN\")" }, { "identifier": "flatten", "path": "utils/helpers.py", "snippet": "@validate_call\ndef flatten(l: list[list]):\n return [i for sub_list in l for i in sub_list]" } ]
import json import os import re import uuid from config import defaults from config import lego from config.database import * from email_validator import validate_email from pydantic import ( AfterValidator, BaseModel, EmailStr, Field, FilePath, HttpUrl, field_validator, model_validator, validator, ) from pydantic.networks import IPv4Address, IPv6Address from typing import Annotated, Any, Literal from . import ( utc_now_as_str, ensure_list, to_unique_sorted_str_list, get_validated_fqdn, flatten, )
12,450
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = []
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = []
created: Annotated[str, Field(default_factory=utc_now_as_str)]
2
2023-12-01 08:36:45+00:00
16k
fzmi/ubdd
models/dino/models/dino/dino.py
[ { "identifier": "box_ops", "path": "models/dino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "models/dino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }" }, { "identifier": "nested_tensor_from_tensor_list", "path": "models/dino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "models/dino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "models/dino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "models/dino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "models/dino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "models/dino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" }, { "identifier": "build_backbone", "path": "models/dino/models/dino/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone: \n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords: \n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n backbone_freeze_keywords = args.backbone_freeze_keywords\n use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n if args.backbone in ['resnet50', 'resnet101']:\n backbone = Backbone(args.backbone, train_backbone, args.dilation, \n return_interm_indices, \n batch_norm=FrozenBatchNorm2d)\n bb_num_channels = backbone.num_channels\n elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n pretrain_img_size = int(args.backbone.split('_')[-2])\n backbone = build_swin_transformer(args.backbone, \\\n pretrain_img_size=pretrain_img_size, \\\n out_indices=tuple(return_interm_indices), \\\n dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n # freeze some layers\n if backbone_freeze_keywords is not None:\n for name, parameter in backbone.named_parameters():\n for keyword in backbone_freeze_keywords:\n if keyword in name:\n parameter.requires_grad_(False)\n break\n if \"backbone_dir\" in args:\n pretrained_dir = args.backbone_dir\n PTDICT = {\n 'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n 'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n 'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n }\n pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n from collections import OrderedDict\n def key_select_function(keyname):\n if 'head' in keyname:\n return False\n if args.dilation and 'layers.3' in keyname:\n return False\n return True\n _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n print(str(_tmp_st_output))\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n elif args.backbone in ['convnext_xlarge_22k']:\n backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n \n\n assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels \n assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n return model" }, { "identifier": "build_matcher", "path": "models/dino/models/dino/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" }, { "identifier": "DETRsegm", "path": "models/dino/models/dino/segmentation.py", "snippet": "class DETRsegm(nn.Module):\n def __init__(self, detr, freeze_detr=False):\n super().__init__()\n self.detr = detr\n\n if freeze_detr:\n for p in self.parameters():\n p.requires_grad_(False)\n\n hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n def forward(self, samples: NestedTensor):\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.detr.backbone(samples)\n\n bs = features[-1].tensors.shape[0]\n\n src, mask = features[-1].decompose()\n assert mask is not None\n src_proj = self.detr.input_proj(src)\n hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n outputs_class = self.detr.class_embed(hs)\n outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.detr.aux_loss:\n out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n # FIXME h_boxes takes the last one computed, keep this in mind\n bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n out[\"pred_masks\"] = outputs_seg_masks\n return out" }, { "identifier": "PostProcessPanoptic", "path": "models/dino/models/dino/segmentation.py", "snippet": "class PostProcessPanoptic(nn.Module):\n \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n coco panoptic API \"\"\"\n\n def __init__(self, is_thing_map, threshold=0.85):\n \"\"\"\n Parameters:\n is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n the class is a thing (True) or a stuff (False) class\n threshold: confidence threshold: segments with confidence lower than this will be deleted\n \"\"\"\n super().__init__()\n self.threshold = threshold\n self.is_thing_map = is_thing_map\n\n def forward(self, outputs, processed_sizes, target_sizes=None):\n \"\"\" This function computes the panoptic prediction from the model's predictions.\n Parameters:\n outputs: This is a dict coming directly from the model. See the model doc for the content.\n processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n model, ie the size after data augmentation but before batching.\n target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n of each prediction. If left to None, it will default to the processed_sizes\n \"\"\"\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n ):\n # we filter empty queries and detection below threshold\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n\n # It may be that we have several predicted masks for the same stuff class.\n # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda: [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n # This helper function creates the final panoptic segmentation image\n # It also returns the area of the masks that appears on the image\n\n m_id = masks.transpose(0, 1).softmax(-1)\n\n if m_id.shape[-1] == 0:\n # We didn't detect any mask :(\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n\n if dedup:\n # Merge the masks corresponding to the same stuff class\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n final_h, final_w = to_tuple(target_size)\n\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n np_seg_img = (\n torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n )\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n # We know filter empty masks as long as we find some\n while True:\n filtered_small = torch.as_tensor(\n [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n )\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n del cur_classes\n\n with io.BytesIO() as out:\n seg_img.save(out, format=\"PNG\")\n predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n preds.append(predictions)\n return preds" }, { "identifier": "PostProcessSegm", "path": "models/dino/models/dino/segmentation.py", "snippet": "class PostProcessSegm(nn.Module):\n def __init__(self, threshold=0.5):\n super().__init__()\n self.threshold = threshold\n\n @torch.no_grad()\n def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n assert len(orig_target_sizes) == len(max_target_sizes)\n max_h, max_w = max_target_sizes.max(0)[0].tolist()\n outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n img_h, img_w = t[0], t[1]\n results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n results[i][\"masks\"] = F.interpolate(\n results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n ).byte()\n\n return results" }, { "identifier": "dice_loss", "path": "models/dino/models/dino/segmentation.py", "snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes" }, { "identifier": "build_deformable_transformer", "path": "models/dino/models/dino/deformable_transformer.py", "snippet": "def build_deformable_transformer(args):\n decoder_query_perturber = None\n if args.decoder_layer_noise:\n from .utils import RandomBoxPerturber\n decoder_query_perturber=RandomBoxPerturber(\n x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n use_detached_boxes_dec_out = False\n try:\n use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n except:\n use_detached_boxes_dec_out =False\n\n return DeformableTransformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_unicoder_layers=args.unic_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n modulate_hw_attn=True,\n\n deformable_encoder=True,\n deformable_decoder=True,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n use_deformable_box_attn=args.use_deformable_box_attn,\n box_attn_type=args.box_attn_type,\n\n learnable_tgt_init=True,\n decoder_query_perturber=decoder_query_perturber,\n\n add_channel_attention=args.add_channel_attention,\n add_pos_value=args.add_pos_value,\n random_refpoints_xy=args.random_refpoints_xy,\n\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n two_stage_pat_embed=args.two_stage_pat_embed,\n two_stage_add_query_num=args.two_stage_add_query_num,\n two_stage_learn_wh=args.two_stage_learn_wh,\n two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n dec_layer_number=args.dec_layer_number,\n rm_self_attn_layers=None,\n key_aware_type=None,\n layer_share_type=None,\n\n rm_detach=None,\n decoder_sa_type=args.decoder_sa_type,\n module_seq=args.decoder_module_seq,\n\n embed_init_tgt=args.embed_init_tgt,\n use_detached_boxes_dec_out=use_detached_boxes_dec_out\n )" }, { "identifier": "sigmoid_focal_loss", "path": "models/dino/models/dino/utils.py", "snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "MLP", "path": "models/dino/models/dino/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/dino/models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "prepare_for_cdn", "path": "models/dino/models/dino/dn_components.py", "snippet": "def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if training:\n targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n dn_number = 1\n else:\n if dn_number >= 100:\n dn_number = dn_number // (int(max(known_num) * 2))\n elif dn_number < 1:\n dn_number = 1\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t['labels'] for t in targets])\n boxes = torch.cat([t['boxes'] for t in targets])\n batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob\n new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_pad = int(max(known_num))\n\n pad_size = int(single_pad * 2 * dn_number)\n positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part,\n diff).cuda() * box_noise_scale\n known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to('cuda')\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to('cuda')\n if len(known_num):\n map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]\n map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n if i == dn_number - 1:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n else:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n dn_meta = {\n 'pad_size': pad_size,\n 'num_dn_group': dn_number,\n }\n else:\n\n input_query_label = None\n input_query_bbox = None\n attn_mask = None\n dn_meta = None\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta" }, { "identifier": "dn_post_process", "path": "models/dino/models/dino/dn_components.py", "snippet": "def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n \"\"\"\n post process of dn after output from the transformer\n put the dn part in the dn_meta\n \"\"\"\n if dn_meta and dn_meta['pad_size'] > 0:\n output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n if aux_loss:\n out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n dn_meta['output_known_lbs_bboxes'] = out\n return outputs_class, outputs_coord" } ]
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from models.dino.util import box_ops from models.dino.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
10,832
else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) if self.random_refpoints_xy: self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False if self.fix_refpoints_hw > 0: print("fix_refpoints_hw: {}".format(self.fix_refpoints_hw)) assert self.random_refpoints_xy self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:]) self.refpoint_embed.weight.data[:, 2:].requires_grad = False elif int(self.fix_refpoints_hw) == -1: pass elif int(self.fix_refpoints_hw) == -2: print('learn a shared h and w') assert self.random_refpoints_xy self.refpoint_embed = nn.Embedding(use_num_queries, 2) self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False self.hw_embed = nn.Embedding(1, 1) else: raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw)) def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) if self.random_refpoints_xy: self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False if self.fix_refpoints_hw > 0: print("fix_refpoints_hw: {}".format(self.fix_refpoints_hw)) assert self.random_refpoints_xy self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:]) self.refpoint_embed.weight.data[:, 2:].requires_grad = False elif int(self.fix_refpoints_hw) == -1: pass elif int(self.fix_refpoints_hw) == -2: print('learn a shared h and w') assert self.random_refpoints_xy self.refpoint_embed = nn.Embedding(use_num_queries, 2) self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False self.hw_embed = nn.Embedding(1, 1) else: raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw)) def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\
prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale),
18
2023-12-04 00:27:58+00:00
16k
girgle/DouZero_For_New_HLDDZ
main.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n self.Interrupt = False\n self.RealRate = (1440, 810)\n self.GetZoomRate()\n for file in os.listdir(\"./pics\"):\n info = file.split(\".\")\n if info[1] == \"png\":\n tmpImage = Image.open(\"./pics/\" + file)\n imgCv = cv2.imread(\"./pics/\" + file)\n self.Pics.update({info[0]: tmpImage})\n self.PicsCV.update({info[0]: imgCv})\n\n def sleep(self, ms):\n self.counter.restart()\n while self.counter.elapsed() < ms:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50)\n\n def Screenshot(self, region=None): # -> (im, (left, top))\n try_count = 3\n success = False\n while try_count > 0 and not success:\n try:\n try_count -= 1\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n hwnd = self.Handle\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n width = right - left\n height = bot - top\n self.RealRate = (width, height)\n width = int(width)\n height = int(height)\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)\n saveDC.SelectObject(saveBitMap)\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 3)\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n im = Image.frombuffer(\n \"RGB\",\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n im = im.resize((1440, 810))\n if region is not None:\n im = im.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))\n if result:\n success = True\n return im, (left, top)\n except Exception as e:\n print(\"截图时出现错误:\", repr(e))\n self.sleep(200)\n return None, (0, 0)\n\n def GetZoomRate(self):\n self.ScreenZoomRate = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100\n\n def LocateOnScreen(self, templateName, region, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n return LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n def ClickOnImage(self, templateName, region=None, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n result = LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n if result is not None:\n self.LeftClick(result)\n print(result)\n\n def LeftClick(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')\n time.sleep(0.1)\n pyautogui.moveTo(int(left + 1000), int(top + 550))\n\n '''win32gui.SetActiveWindow(self.Handle)\n lParam = win32api.MAKELONG(x, y)\n\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_MOUSEMOVE, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONDOWN, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONUP, MK_LBUTTON, lParam)'''\n\n def LeftClick2(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')" }, { "identifier": "get_move_type", "path": "douzero/env/move_detector.py", "snippet": "def get_move_type(move):\n move_size = len(move)\n move_dict = collections.Counter(move)\n\n if move_size == 0:\n return {'type': TYPE_0_PASS}\n\n if move_size == 1:\n return {'type': TYPE_1_SINGLE, 'rank': move[0]}\n\n if move_size == 2:\n if move[0] == move[1]:\n return {'type': TYPE_2_PAIR, 'rank': move[0]}\n elif move == [20, 30]: # Kings\n return {'type': TYPE_5_KING_BOMB}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 3:\n if len(move_dict) == 1:\n return {'type': TYPE_3_TRIPLE, 'rank': move[0]}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 4:\n if len(move_dict) == 1:\n return {'type': TYPE_4_BOMB, 'rank': move[0]}\n elif len(move_dict) == 2:\n if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]:\n return {'type': TYPE_6_3_1, 'rank': move[1]}\n else:\n return {'type': TYPE_15_WRONG}\n else:\n return {'type': TYPE_15_WRONG}\n\n if is_continuous_seq(move):\n return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)}\n\n if move_size == 5:\n if len(move_dict) == 2:\n return {'type': TYPE_7_3_2, 'rank': move[2]}\n else:\n return {'type': TYPE_15_WRONG}\n\n count_dict = collections.defaultdict(int)\n for c, n in move_dict.items():\n count_dict[n] += 1\n\n if move_size == 6:\n if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \\\n (count_dict.get(2) == 1 or count_dict.get(1) == 2):\n return {'type': TYPE_13_4_2, 'rank': move[2]}\n\n if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and\n (count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2):\n return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])}\n\n mdkeys = sorted(move_dict.keys())\n if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys):\n return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys):\n return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n # Check Type 11 (serial 3+1) and Type 12 (serial 3+2)\n if count_dict.get(3, 0) >= MIN_TRIPLES:\n serial_3 = list()\n single = list()\n pair = list()\n\n for k, v in move_dict.items():\n if v == 3:\n serial_3.append(k)\n elif v == 1:\n single.append(k)\n elif v == 2:\n pair.append(k)\n else: # no other possibilities\n return {'type': TYPE_15_WRONG}\n\n serial_3.sort()\n if is_continuous_seq(serial_3):\n if len(serial_3) == len(single)+len(pair)*2:\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)}\n if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2:\n return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)}\n\n if len(serial_3) == 4:\n if is_continuous_seq(serial_3[1:]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1}\n if is_continuous_seq(serial_3[:-1]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1}\n\n return {'type': TYPE_15_WRONG}" }, { "identifier": "Ui_Form", "path": "MainWindow.py", "snippet": "class Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(677, 450)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(9)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n Form.setFont(font)\n Form.setWindowOpacity(0.8)\n self.WinRate = QtWidgets.QLabel(Form)\n self.WinRate.setGeometry(QtCore.QRect(320, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.WinRate.setFont(font)\n self.WinRate.setAlignment(QtCore.Qt.AlignCenter)\n self.WinRate.setObjectName(\"WinRate\")\n self.UserHandCards = QtWidgets.QLabel(Form)\n self.UserHandCards.setGeometry(QtCore.QRect(30, 330, 351, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.UserHandCards.setFont(font)\n self.UserHandCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.UserHandCards.setObjectName(\"UserHandCards\")\n self.ThreeLandlordCards = QtWidgets.QLabel(Form)\n self.ThreeLandlordCards.setGeometry(QtCore.QRect(30, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.ThreeLandlordCards.setFont(font)\n self.ThreeLandlordCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.ThreeLandlordCards.setObjectName(\"ThreeLandlordCards\")\n self.BidWinrate = QtWidgets.QLabel(Form)\n self.BidWinrate.setGeometry(QtCore.QRect(30, 220, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.BidWinrate.setFont(font)\n self.BidWinrate.setObjectName(\"BidWinrate\")\n self.PreWinrate = QtWidgets.QLabel(Form)\n self.PreWinrate.setGeometry(QtCore.QRect(30, 280, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PreWinrate.setFont(font)\n self.PreWinrate.setObjectName(\"PreWinrate\")\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(490, 320, 101, 41))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.LPlayedCard = QtWidgets.QLabel(Form)\n self.LPlayedCard.setGeometry(QtCore.QRect(170, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LPlayedCard.setFont(font)\n self.LPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.LPlayedCard.setObjectName(\"LPlayedCard\")\n self.splitter_2 = QtWidgets.QSplitter(Form)\n self.splitter_2.setGeometry(QtCore.QRect(20, 380, 621, 41))\n self.splitter_2.setOrientation(QtCore.Qt.Horizontal)\n self.splitter_2.setObjectName(\"splitter_2\")\n self.SingleButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.SingleButton.setFont(font)\n self.SingleButton.setObjectName(\"SingleButton\")\n self.LoopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LoopButton.setFont(font)\n self.LoopButton.setObjectName(\"LoopButton\")\n self.StopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.StopButton.setFont(font)\n self.StopButton.setObjectName(\"StopButton\")\n self.tableWidget = QtWidgets.QTableWidget(Form)\n self.tableWidget.setGeometry(QtCore.QRect(20, 10, 611, 75))\n self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 75))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget.setFont(font)\n self.tableWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tableWidget.setStyleSheet(\"QTableWidget{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:#444444;\\n\"\n\"border:1px solid #242424;\\n\"\n\"alternate-background-color:#525252;\\n\"\n\"gridline-color:#242424;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:selected{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #484848,stop:1 #383838);\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:hover{\\n\"\n\"background:#5B5B5B;\\n\"\n\"}\\n\"\n\"QHeaderView::section{\\n\"\n\"text-align:center;\\n\"\n\"background:#5E5E5E;\\n\"\n\"padding:3px;\\n\"\n\"margin:0px;\\n\"\n\"color:#DCDCDC;\\n\"\n\"border:1px solid #242424;\\n\"\n\"border-left-width:0;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar:vertical{\\n\"\n\"background:#484848;\\n\"\n\"padding:0px;\\n\"\n\"border-radius:6px;\\n\"\n\"max-width:12px;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::handle:vertical{\\n\"\n\"background:#CCCCCC;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::handle:hover:vertical,QScrollBar::handle:pressed:vertical{\\n\"\n\"background:#A7A7A7;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-page:vertical{\\n\"\n\"background:444444;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::add-page:vertical{\\n\"\n\"background:5B5B5B;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::add-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\")\n self.tableWidget.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.tableWidget.setMidLineWidth(-1)\n self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setAutoScroll(False)\n self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tableWidget.setTextElideMode(QtCore.Qt.ElideNone)\n self.tableWidget.setObjectName(\"tableWidget\")\n self.tableWidget.setColumnCount(15)\n self.tableWidget.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tableWidget.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(5, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(6, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(7, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(8, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(9, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(10, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(11, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(12, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(13, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(14, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 0, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 1, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 2, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 3, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 4, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 5, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 6, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 7, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 8, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 9, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 10, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 11, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 12, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 13, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 14, item)\n self.tableWidget.horizontalHeader().setVisible(True)\n self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(41)\n self.tableWidget.horizontalHeader().setStretchLastSection(True)\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.verticalHeader().setCascadingSectionResizes(False)\n self.tableWidget.verticalHeader().setDefaultSectionSize(40)\n self.tableWidget.verticalHeader().setHighlightSections(True)\n self.tableWidget.verticalHeader().setMinimumSectionSize(40)\n self.tableWidget.verticalHeader().setSortIndicatorShown(False)\n self.RPlayedCard = QtWidgets.QLabel(Form)\n self.RPlayedCard.setGeometry(QtCore.QRect(490, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.RPlayedCard.setFont(font)\n self.RPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.RPlayedCard.setObjectName(\"RPlayedCard\")\n self.PredictedCard = QtWidgets.QLabel(Form)\n self.PredictedCard.setGeometry(QtCore.QRect(320, 190, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PredictedCard.setFont(font)\n self.PredictedCard.setStyleSheet(\"\")\n self.PredictedCard.setFrameShape(QtWidgets.QFrame.Panel)\n self.PredictedCard.setLineWidth(1)\n self.PredictedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.PredictedCard.setObjectName(\"PredictedCard\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Hi\"))\n self.WinRate.setText(_translate(\"Form\", \"评分\"))\n self.UserHandCards.setText(_translate(\"Form\", \"手牌\"))\n self.ThreeLandlordCards.setText(_translate(\"Form\", \"地主牌\"))\n self.BidWinrate.setText(_translate(\"Form\", \"叫牌胜率:\"))\n self.PreWinrate.setText(_translate(\"Form\", \"局前胜率:\"))\n self.label.setText(_translate(\"Form\", \"游戏状态\"))\n self.LPlayedCard.setText(_translate(\"Form\", \"上家出牌区域\"))\n self.SingleButton.setText(_translate(\"Form\", \"单局\"))\n self.LoopButton.setText(_translate(\"Form\", \" 连续\"))\n self.StopButton.setText(_translate(\"Form\", \"停止\"))\n item = self.tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"Form\", \"大\"))\n item = self.tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"Form\", \"小\"))\n item = self.tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"Form\", \"2\"))\n item = self.tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"Form\", \"A\"))\n item = self.tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"Form\", \"K\"))\n item = self.tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"Form\", \"Q\"))\n item = self.tableWidget.horizontalHeaderItem(6)\n item.setText(_translate(\"Form\", \"J\"))\n item = self.tableWidget.horizontalHeaderItem(7)\n item.setText(_translate(\"Form\", \"10\"))\n item = self.tableWidget.horizontalHeaderItem(8)\n item.setText(_translate(\"Form\", \"9\"))\n item = self.tableWidget.horizontalHeaderItem(9)\n item.setText(_translate(\"Form\", \"8\"))\n item = self.tableWidget.horizontalHeaderItem(10)\n item.setText(_translate(\"Form\", \"7\"))\n item = self.tableWidget.horizontalHeaderItem(11)\n item.setText(_translate(\"Form\", \"6\"))\n item = self.tableWidget.horizontalHeaderItem(12)\n item.setText(_translate(\"Form\", \"5\"))\n item = self.tableWidget.horizontalHeaderItem(13)\n item.setText(_translate(\"Form\", \"4\"))\n item = self.tableWidget.horizontalHeaderItem(14)\n item.setText(_translate(\"Form\", \"3\"))\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n item = self.tableWidget.item(0, 0)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 1)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 2)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 3)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 4)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 5)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 6)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 7)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 8)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 9)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 10)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 11)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 12)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 13)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 14)\n item.setText(_translate(\"Form\", \"0\"))\n self.tableWidget.setSortingEnabled(__sortingEnabled)\n self.RPlayedCard.setText(_translate(\"Form\", \"下家出牌区域\"))\n self.PredictedCard.setText(_translate(\"Form\", \"AI出牌区域\"))" }, { "identifier": "GameEnv", "path": "douzero/env/game.py", "snippet": "class GameEnv(object):\n\n def __init__(self, players):\n\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.players = players\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.num_wins = {'landlord': 0,\n 'farmer': 0}\n\n self.num_scores = {'landlord': 0,\n 'farmer': 0}\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 1,\n 'landlord_up': 1,\n 'landlord_down': 1}\n self.step_count = 0\n\n\n def card_play_init(self, card_play_data):\n self.info_sets['landlord'].player_hand_cards = \\\n card_play_data['landlord']\n self.info_sets['landlord_up'].player_hand_cards = \\\n card_play_data['landlord_up']\n self.info_sets['landlord_down'].player_hand_cards = \\\n card_play_data['landlord_down']\n self.three_landlord_cards = card_play_data['three_landlord_cards']\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n\n\n def game_done(self):\n if len(self.info_sets['landlord'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_up'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_down'].player_hand_cards) == 0:\n # if one of the three players discards his hand,\n # then game is over.\n self.compute_player_utility()\n self.update_num_wins_scores()\n\n self.game_over = True\n\n def compute_player_utility(self):\n\n if len(self.info_sets['landlord'].player_hand_cards) == 0:\n self.player_utility_dict = {'landlord': 2,\n 'farmer': -1}\n else:\n self.player_utility_dict = {'landlord': -2,\n 'farmer': 1}\n\n def update_num_wins_scores(self):\n for pos, utility in self.player_utility_dict.items():\n base_score = 2 if pos == 'landlord' else 1\n if utility > 0:\n self.num_wins[pos] += 1\n self.winner = pos\n self.num_scores[pos] += base_score * (2 ** self.bomb_num)\n else:\n self.num_scores[pos] -= base_score * (2 ** self.bomb_num)\n\n def get_winner(self):\n return self.winner\n\n def get_bomb_num(self):\n return self.bomb_num\n\n def step(self, position, action=[]):\n win_rate = 0\n if self.acting_player_position == position:\n action, actions_confidence = self.players[1].act(self.game_infoset)\n # 计算胜率\n win_rate = actions_confidence\n # win_rate = max(actions_confidence, -1)\n # win_rate = min(win_rate, 1)\n # win_rate = str(round(float((win_rate + 1) / 2), 4))\n\n if len(action) > 0:\n self.last_pid = self.acting_player_position\n\n if action in bombs:\n self.bomb_num += 1\n\n self.last_move_dict[\n self.acting_player_position] = action.copy()\n\n self.card_play_action_seq.append((position, action))\n self.update_acting_player_hand_cards(action)\n\n self.played_cards[self.acting_player_position] += action\n\n if self.acting_player_position == 'landlord' and \\\n len(action) > 0 and \\\n len(self.three_landlord_cards) > 0:\n for card in action:\n if len(self.three_landlord_cards) > 0:\n if card in self.three_landlord_cards:\n self.three_landlord_cards.remove(card)\n else:\n break\n self.game_done()\n if not self.game_over:\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n # 返回动作和胜率,只有玩家角色会接受返回值\n action_message = {\"action\": str(''.join([EnvCard2RealCard[c] for c in action])),\n \"win_rate\": str(round(float(win_rate), 4))}\n return action_message\n\n def get_last_move(self):\n last_move = []\n if len(self.card_play_action_seq) != 0:\n if len(self.card_play_action_seq[-1][1]) == 0:\n last_move = self.card_play_action_seq[-2][1]\n else:\n last_move = self.card_play_action_seq[-1][1]\n\n return last_move\n\n def get_last_two_moves(self):\n last_two_moves = [[], []]\n for card in self.card_play_action_seq[-2:]:\n last_two_moves.insert(0, card[1])\n last_two_moves = last_two_moves[:2]\n return last_two_moves\n\n def get_acting_player_position(self):\n if self.acting_player_position is None:\n self.acting_player_position = 'landlord'\n\n else:\n if self.acting_player_position == 'landlord':\n self.acting_player_position = 'landlord_down'\n\n elif self.acting_player_position == 'landlord_down':\n self.acting_player_position = 'landlord_up'\n\n else:\n self.acting_player_position = 'landlord'\n\n return self.acting_player_position\n\n def update_acting_player_hand_cards(self, action):\n if action != []:\n # 更新玩家手牌,删除对应的牌\n if self.acting_player_position == self.players[0]:\n for card in action:\n self.info_sets[self.acting_player_position].player_hand_cards.remove(card)\n # 更新另外两个玩家手牌,删除相同数量的牌\n else:\n del self.info_sets[self.acting_player_position].player_hand_cards[0:len(action)]\n self.info_sets[self.acting_player_position].player_hand_cards.sort()\n\n def get_legal_card_play_actions(self):\n mg = MovesGener(\n self.info_sets[self.acting_player_position].player_hand_cards)\n\n action_sequence = self.card_play_action_seq\n\n rival_move = []\n if len(action_sequence) != 0:\n if len(action_sequence[-1][1]) == 0:\n rival_move = action_sequence[-2][1]\n else:\n rival_move = action_sequence[-1][1]\n\n rival_type = md.get_move_type(rival_move)\n rival_move_type = rival_type['type']\n rival_move_len = rival_type.get('len', 1)\n moves = list()\n\n if rival_move_type == md.TYPE_0_PASS:\n moves = mg.gen_moves()\n\n elif rival_move_type == md.TYPE_1_SINGLE:\n all_moves = mg.gen_type_1_single()\n moves = ms.filter_type_1_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_2_PAIR:\n all_moves = mg.gen_type_2_pair()\n moves = ms.filter_type_2_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_3_TRIPLE:\n all_moves = mg.gen_type_3_triple()\n moves = ms.filter_type_3_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_4_BOMB:\n all_moves = mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n moves = ms.filter_type_4_bomb(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_5_KING_BOMB:\n moves = []\n\n elif rival_move_type == md.TYPE_6_3_1:\n all_moves = mg.gen_type_6_3_1()\n moves = ms.filter_type_6_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_7_3_2:\n all_moves = mg.gen_type_7_3_2()\n moves = ms.filter_type_7_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_8_SERIAL_SINGLE:\n all_moves = mg.gen_type_8_serial_single(repeat_num=rival_move_len)\n moves = ms.filter_type_8_serial_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_9_SERIAL_PAIR:\n all_moves = mg.gen_type_9_serial_pair(repeat_num=rival_move_len)\n moves = ms.filter_type_9_serial_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_10_SERIAL_TRIPLE:\n all_moves = mg.gen_type_10_serial_triple(repeat_num=rival_move_len)\n moves = ms.filter_type_10_serial_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_11_SERIAL_3_1:\n all_moves = mg.gen_type_11_serial_3_1(repeat_num=rival_move_len)\n moves = ms.filter_type_11_serial_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_12_SERIAL_3_2:\n all_moves = mg.gen_type_12_serial_3_2(repeat_num=rival_move_len)\n moves = ms.filter_type_12_serial_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_13_4_2:\n all_moves = mg.gen_type_13_4_2()\n moves = ms.filter_type_13_4_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_14_4_22:\n all_moves = mg.gen_type_14_4_22()\n moves = ms.filter_type_14_4_22(all_moves, rival_move)\n\n if rival_move_type not in [md.TYPE_0_PASS,\n md.TYPE_4_BOMB, md.TYPE_5_KING_BOMB]:\n moves = moves + mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n\n if len(rival_move) != 0: # rival_move is not 'pass'\n moves = moves + [[]]\n\n for m in moves:\n m.sort()\n\n return moves\n\n def reset(self):\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 0,\n 'landlord_up': 0,\n 'landlord_down': 0}\n self.step_count = 0\n\n def get_infoset(self):\n self.info_sets[\n self.acting_player_position].last_pid = self.last_pid\n\n self.info_sets[\n self.acting_player_position].legal_actions = \\\n self.get_legal_card_play_actions()\n\n self.info_sets[\n self.acting_player_position].bomb_num = self.bomb_num\n\n self.info_sets[\n self.acting_player_position].last_move = self.get_last_move()\n\n self.info_sets[\n self.acting_player_position].last_two_moves = self.get_last_two_moves()\n\n self.info_sets[\n self.acting_player_position].last_move_dict = self.last_move_dict\n\n self.info_sets[self.acting_player_position].num_cards_left_dict = \\\n {pos: len(self.info_sets[pos].player_hand_cards)\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n self.info_sets[self.acting_player_position].other_hand_cards = []\n\n '''\n 调整计算其他人手牌的方法,整副牌减去玩家手牌与出过的牌\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n if pos != self.acting_player_position:\n self.info_sets[\n self.acting_player_position].other_hand_cards += \\\n self.info_sets[pos].player_hand_cards\n '''\n # 把出过的牌中三个子列表合成一个列表\n played_cards_tmp = []\n for i in list(self.played_cards.values()):\n played_cards_tmp.extend(i)\n # 出过的牌和玩家手上的牌\n played_and_hand_cards = played_cards_tmp + self.info_sets[self.acting_player_position].player_hand_cards\n # 整副牌减去出过的牌和玩家手上的牌,就是其他人的手牌\n for i in set(AllEnvCard):\n self.info_sets[\n self.acting_player_position].other_hand_cards.extend([i] * (AllEnvCard.count(i) - played_and_hand_cards.count(i)))\n\n self.info_sets[self.acting_player_position].played_cards = \\\n self.played_cards\n self.info_sets[self.acting_player_position].three_landlord_cards = \\\n self.three_landlord_cards\n self.info_sets[self.acting_player_position].card_play_action_seq = \\\n self.card_play_action_seq\n\n self.info_sets[\n self.acting_player_position].all_handcards = \\\n {pos: self.info_sets[pos].player_hand_cards\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n # Custom bid info\n self.info_sets[self.acting_player_position].bid_info = bid_infos[self.acting_player_position]\n\n return deepcopy(self.info_sets[self.acting_player_position])" }, { "identifier": "DeepAgent", "path": "douzero/evaluation/deep_agent.py", "snippet": "class DeepAgent:\n\n def __init__(self, position, model_path):\n self.model_type = \"old\"\n if \"general\" in model_path:\n self.model_type = \"general\"\n elif \"resnet\" in model_path:\n self.model_type = \"resnet\"\n self.model = _load_model(position, model_path, self.model_type)\n\n def act(self, infoset):\n obs = get_obs(infoset, model_type=self.model_type)\n z_batch = torch.from_numpy(obs['z_batch']).float()\n x_batch = torch.from_numpy(obs['x_batch']).float()\n if torch.cuda.is_available():\n z_batch, x_batch = z_batch.cuda(), x_batch.cuda()\n y_pred = self.model.forward(z_batch, x_batch, return_value=True)['values']\n y_pred = y_pred.detach().cpu().numpy()\n\n best_action_index = np.argmax(y_pred, axis=0)[0]\n best_action = infoset.legal_actions[best_action_index]\n best_action_confidence = y_pred[best_action_index]\n return best_action, best_action_confidence" } ]
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
12,150
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper()
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper()
class MyPyQT_Form(QtWidgets.QWidget, Ui_Form):
2
2023-12-01 04:04:30+00:00
16k
yongzhuo/MacroGPT-Pretrain
macro_gpt/ft_gpt/train.pt.speed.py
[ { "identifier": "CUDA_VISIBLE_DEVICES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CUDA_VISIBLE_DEVICES = \"0\"" }, { "identifier": "USE_TORCH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_TORCH = \"1\"" }, { "identifier": "CPU_NUMS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CPU_NUMS = \"9\"" }, { "identifier": "LlamaForCausalLM", "path": "macro_gpt/models/llama/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n # logits = self.lm_head(hidden_states)\n logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype))\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values is not None:\n past_length = past_key_values[0][0].shape[2]\n\n # Some generation methods already pass only the last input ID\n if input_ids.shape[1] > past_length:\n remove_prefix_length = past_length\n else:\n # Default to old behavior: keep only final ID\n remove_prefix_length = input_ids.shape[1] - 1\n\n input_ids = input_ids[:, remove_prefix_length:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "LlamaTokenizer", "path": "macro_gpt/models/llama/tokenization_llama.py", "snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<unk>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<s>\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"</s>\"`):\n The end of sequence token.\n pad_token (`str` or `tokenizers.AddedToken`, *optional*):\n A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by\n attention mechanisms or loss computation.\n sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):\n Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for\n SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,\n to set:\n\n - `enable_sampling`: Enable subword regularization.\n - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.\n\n - `nbest_size = {0,1}`: No sampling is performed.\n - `nbest_size > 1`: samples from the nbest_size results.\n - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)\n using forward-filtering-and-backward-sampling algorithm.\n\n - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for\n BPE-dropout.\n\n add_bos_token (`bool`, *optional*, defaults to `True`):\n Whether or not to add an `bos_token` at the start of sequences.\n add_eos_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add an `eos_token` at the end of sequences.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like\n extra spaces.\n use_default_system_prompt (`bool`, *optional*, defaults to `True`):\n Whether or not the default system prompt for Llama should be used.\n spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to add spaces between special tokens.\n legacy (`bool`, *optional*):\n Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622\n and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple\n example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n use_default_system_prompt=True,\n spaces_between_special_tokens=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This is\"\n \" expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you.\"\n \" If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it\"\n \" means, and thouroughly read the reason why this was added as explained in\"\n \" https://github.com/huggingface/transformers/pull/24565\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.use_default_system_prompt = use_default_system_prompt\n self.sp_model = self.get_spm_processor(kwargs.pop(\"from_slow\", False))\n\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n use_default_system_prompt=use_default_system_prompt,\n spaces_between_special_tokens=spaces_between_special_tokens,\n legacy=legacy,\n **kwargs,\n )\n\n @property\n def unk_token_length(self):\n return len(self.sp_model.encode(str(self.unk_token)))\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor\n def get_spm_processor(self, from_slow=False):\n tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n if self.legacy or from_slow: # no dependency on protobuf\n tokenizer.Load(self.vocab_file)\n return tokenizer\n\n with open(self.vocab_file, \"rb\") as f:\n sp_model = f.read()\n model_pb2 = import_protobuf(f\"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)\")\n model = model_pb2.ModelProto.FromString(sp_model)\n normalizer_spec = model_pb2.NormalizerSpec()\n normalizer_spec.add_dummy_prefix = False\n model.normalizer_spec.MergeFrom(normalizer_spec)\n sp_model = model.SerializeToString()\n tokenizer.LoadFromSerializedProto(sp_model)\n return tokenizer\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", add_special_tokens=False, **kwargs) -> List[str]:\n \"\"\"\n Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the\n first token is special.\n \"\"\"\n if self.legacy or len(text) == 0:\n return super().tokenize(text, **kwargs)\n\n tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \"), **kwargs)\n\n if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:\n tokens = tokens[1:]\n return tokens\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any\n SPIECE_UNDERLINE. For example: `self.sp_model.encode(f\"{SPIECE_UNDERLINE}Hey\", out_type = str)` will give\n `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f\"{unk_token}text\"` and strip the\n `unk_token`. Here is an example with `unk_token = \"<unk>\"` and `unk_token_length = 4`.\n `self.tokenizer.sp_model.encode(\"<unk> Hey\", out_type = str)[4:]`.\n \"\"\"\n tokens = self.sp_model.encode(text, out_type=str)\n if self.legacy or not text.startswith((SPIECE_UNDERLINE, \" \")):\n return tokens\n\n # 1. Encode string + prefix ex: \"<unk> Hey\"\n tokens = self.sp_model.encode(self.unk_token + text, out_type=str)\n # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']\n return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n # since we manually add the prefix space, we have to remove it when decoding\n if tokens[0].startswith(SPIECE_UNDERLINE):\n tokens[0] = tokens[0][1:]\n\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0 and self.legacy:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n @property\n def default_chat_template(self):\n \"\"\"\n LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.\n Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict\n user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering\n rather than needing special tokens. The system message is partly 'embedded' in the first user message, which\n results in an unusual token ordering when it is present. This template should definitely be changed if you wish\n to fine-tune a model with more flexible role ordering!\n\n The output should look something like:\n\n <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos> <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n \"\"\"\n\n template = (\n \"{% if messages[0]['role'] == 'system' %}\"\n \"{% set loop_messages = messages[1:] %}\" # Extract system message if it's present\n \"{% set system_message = messages[0]['content'] %}\"\n \"{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}\"\n \"{% set loop_messages = messages %}\" # Or use the default system message if the flag is set\n \"{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}\"\n \"{% else %}\"\n \"{% set loop_messages = messages %}\"\n \"{% set system_message = false %}\"\n \"{% endif %}\"\n \"{% for message in loop_messages %}\" # Loop over all non-system messages\n \"{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\"\n \"{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\"\n \"{% endif %}\"\n \"{% if loop.index0 == 0 and system_message != false %}\" # Embed system message in first message\n \"{% set content = '<<SYS>>\\\\n' + system_message + '\\\\n<</SYS>>\\\\n\\\\n' + message['content'] %}\"\n \"{% else %}\"\n \"{% set content = message['content'] %}\"\n \"{% endif %}\"\n \"{% if message['role'] == 'user' %}\" # After all of that, handle messages/roles in a fairly normal way\n \"{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}\"\n \"{% elif message['role'] == 'system' %}\"\n \"{{ '<<SYS>>\\\\n' + content.strip() + '\\\\n<</SYS>>\\\\n\\\\n' }}\"\n \"{% elif message['role'] == 'assistant' %}\"\n \"{{ ' ' + content.strip() + ' ' + eos_token }}\"\n \"{% endif %}\"\n \"{% endfor %}\"\n )\n template = template.replace(\"USE_DEFAULT_PROMPT\", \"true\" if self.use_default_system_prompt else \"false\")\n default_message = DEFAULT_SYSTEM_PROMPT.replace(\"\\n\", \"\\\\n\").replace(\"'\", \"\\\\'\")\n template = template.replace(\"DEFAULT_SYSTEM_MESSAGE\", default_message)\n\n return template" }, { "identifier": "LlamaConfig", "path": "macro_gpt/models/llama/modeling_llama.py", "snippet": "def is_flash_attn_available():\n def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]:\ndef _get_unpad_data(padding_mask):\ndef _make_causal_mask(\n input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0\n):\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n def __init__(self, hidden_size, eps=1e-6):\n def forward(self, hidden_states):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def forward(self, x, seq_len=None):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\ndef rotate_half(x):\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids):\n def __init__(self, config):\n def forward(self, x):\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n def __init__(self, config: LlamaConfig):\n def _init_rope(self):\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def _flash_attention_forward(\n self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None\n ):\n def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length):\n def __init__(self, config: LlamaConfig):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n def _init_weights(self, module):\n def _set_gradient_checkpointing(self, module, value=False):\n def __init__(self, config: LlamaConfig):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def set_decoder(self, decoder):\n def get_decoder(self):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n def _reorder_cache(past_key_values, beam_idx):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n_CONFIG_FOR_DOC = \"LlamaConfig\"\nLLAMA_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`LlamaConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nLLAMA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\nclass LlamaRMSNorm(nn.Module):\nclass LlamaRotaryEmbedding(nn.Module):\nclass LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaMLP(nn.Module):\nclass LlamaAttention(nn.Module):\nclass LlamaFlashAttention2(LlamaAttention):\nclass LlamaDecoderLayer(nn.Module):\nclass LlamaPreTrainedModel(PreTrainedModel):\nclass LlamaModel(LlamaPreTrainedModel):\nclass LlamaForCausalLM(LlamaPreTrainedModel):\nclass LlamaForSequenceClassification(LlamaPreTrainedModel):" }, { "identifier": "PATH_MODEL_PRETRAIN", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_MODEL_PRETRAIN = \"\"" }, { "identifier": "DATA_PATH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "DATA_PATH = \"../datasets/tigerbot-train-00001-of-00097.json\"" }, { "identifier": "MODEL_SAVE_DIR", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MODEL_SAVE_DIR = \"model_macrogpt_1b3_float32\"" }, { "identifier": "REPO_ID", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "REPO_ID = \"Macropodus/macrogpt-tokenizer\"" }, { "identifier": "MICRO_BATCH_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MICRO_BATCH_SIZE = 4 # default=4 # this could actually be 5 but i like powers of 2" }, { "identifier": "BATCH_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "BATCH_SIZE = 128" }, { "identifier": "GRADIENT_ACCUMULATION_STEPS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE" }, { "identifier": "LEARNING_RATE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LEARNING_RATE = 3e-4 # default=3e-4 # the Karpathy constant" }, { "identifier": "EPOCHS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "EPOCHS = 1 # default=3 # we don't always need 3 tbh" }, { "identifier": "SAVE_STEPS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "SAVE_STEPS = 384" }, { "identifier": "VAL_SET_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "VAL_SET_SIZE = 0" }, { "identifier": "TARGET_MODULES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "TARGET_MODULES = [\"query_key_value\"]" }, { "identifier": "IS_PARALLELIZABLE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "IS_PARALLELIZABLE = False" }, { "identifier": "MODEL_PARALLEL", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MODEL_PARALLEL = False" }, { "identifier": "USE_CACHE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_CACHE = False" }, { "identifier": "MAX_LENGTH_Q", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_Q = 1024 - 2 # default=128 - 2" }, { "identifier": "MAX_LENGTH_A", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_A = 1024 - 2 # default=128 - 2" }, { "identifier": "MAX_LENGTH_QA", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_QA = MAX_LENGTH_Q + MAX_LENGTH_A + 4" }, { "identifier": "LORA_DROPOUT", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_DROPOUT = 0.05" }, { "identifier": "LORA_ALPHA", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_ALPHA = 16" }, { "identifier": "LORA_R", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_R = 8" }, { "identifier": "PATH_MODEL_CONFIG", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_MODEL_CONFIG = \"config_macrogpt_1b3_float32.json\" or MODEL_SAVE_DIR" }, { "identifier": "PATH_TOKENIZER_PRETRAIN", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_TOKENIZER_PRETRAIN = REPO_ID or \"./macrogpt.model\"" } ]
import random import copy import sys import os import torch.distributed as dist import bitsandbytes as bnb import torch.nn as nn import transformers import torch from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig) from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.modeling_utils import unwrap_model from tensorboardX import SummaryWriter from datasets import load_dataset from macro_gpt.models.llama.modeling_llama import LlamaForCausalLM as LLMForCausalLM from macro_gpt.models.llama.tokenization_llama import LlamaTokenizer as LLMTokenizer from macro_gpt.models.llama.modeling_llama import LlamaConfig as LLMConfig from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LORA_DROPOUT, LORA_ALPHA, LORA_R from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_CONFIG, PATH_TOKENIZER_PRETRAIN
13,834
# ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 50256 ID_SOP = 50256 ID_BOS = 50256 ID_EOS = 50256 ID_PAD = 50256 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } # model = GPT2LMHeadModel.from_pretrained(PATH_MODEL_PRETRAIN) gpt2_config = LLMConfig.from_json_file(PATH_MODEL_CONFIG) model = LLMForCausalLM(gpt2_config) model.init_weights() model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL model.config.use_cache = USE_CACHE model = model.cuda() print_rank_0_named_parameters(model) tensorboardx_witer = SummaryWriter(logdir=MODEL_SAVE_DIR) # files = dfs_file(DATA_PATH) # files = [files for file in files if "data_merge.0" in file or "data_merge.1" in file] ### 只有一个train的情况 # data = load_dataset("json", data_files={"train": files}) data = load_dataset("json", data_files=DATA_PATH) # data = load_dataset("json", data_dir=DATA_PATH) # if VAL_SET_SIZE > 0: # # train_val = data["train"].train_test_split(test_size=min(VAL_SET_SIZE, # # int(len(data["train"])/10000)), shuffle=True, seed=42) # VAL_SET_SIZE = max(min(VAL_SET_SIZE, int(len(data["train"])/10000)), 1) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=VAL_SET_SIZE, shuffle=True, seed=42) # train_data = train_val["train"].shuffle().map(generate_prompt) # val_data = train_val["test"].shuffle().map(generate_prompt) # else: generate_prompt(data["train"][0], is_logger=True) train_data = data["train"].shuffle().map(generate_prompt) val_data = None class CustomTrainer(transformers.Trainer): def compute_loss(self, model, inputs, return_outputs=False): inputs = {k: v.cuda() for k, v in inputs.items()} outputs = model(**inputs) # if contain labels, will calculate loss if local_rank_is_0: logs = {} tr_loss_scalar = self._nested_gather(outputs.loss.detach()).mean().item() logs["loss"] = round(tr_loss_scalar, 4) logs["lr"] = self.lr_scheduler.get_last_lr()[0] step = self.state.global_step for k, v in logs.items(): tensorboardx_witer.add_scalar(k, v, step) self.log(logs) if self.label_smoother is not None and "labels" in inputs: labels = inputs.pop("labels") else: labels = None # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] return (loss, outputs) if return_outputs else loss trainer = CustomTrainer( # data_collator=transformers.DataCollatorForSeq2Seq( # tokenizer, pad_to_multiple_of=8, # return_tensors="pt", padding=True # ), data_collator=data_collator, train_dataset=train_data, eval_dataset=val_data, model=model, args=transformers.TrainingArguments( gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS, per_device_train_batch_size=MICRO_BATCH_SIZE, learning_rate=LEARNING_RATE, num_train_epochs=EPOCHS, max_grad_norm=1.0, logging_steps=20, # warmup_steps=32, # warmup_steps=382, # 618 warmup_ratio=0.01, # warmup_steps=16, evaluation_strategy="no", lr_scheduler_type="constant", #'constant', # "cosine", logging_first_step=False, # evaluation_strategy="steps" if VAL_SET_SIZE > 0 else "no", # eval_steps=SAVE_STEPS if VAL_SET_SIZE > 0 else None, save_strategy="steps", save_total_limit=3,
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2023/3/5 21:04 # @author : Mo # @function: macro-gpt path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) sys.path.append(path_root) os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072" # os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES os.environ["USE_TORCH"] = USE_TORCH os.environ["OMP_NUM_THREADS"] = CPU_NUMS # export OMP_NUM_THREADS=1 os.environ["OPENBLAS_NUM_THREADS"] = CPU_NUMS # export OPENBLAS_NUM_THREADS=1 os.environ["MKL_NUM_THREADS"] = CPU_NUMS # export MKL_NUM_THREADS=1 os.environ["VECLIB_MAXIMUM_THREADS"] = CPU_NUMS # export VECLIB_MAXIMUM_THREADS=1 os.environ["NUMEXPR_NUM_THREADS"] = CPU_NUMS # export NUMEXPR_NUM_THREADS=1 def save_model_state(model, config=None, model_save_dir="./", model_name="adapter_model.bin"): """ 仅保存 有梯度 的 模型参数(推荐使用) """ if not os.path.exists(model_save_dir): os.makedirs(model_save_dir) # save config if config: config.save_pretrained(model_save_dir) # config.to_dict() # save model path_model = os.path.join(model_save_dir, model_name) # grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters() # if v.requires_grad == True} grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters()} torch.save(grad_params_dict, path_model) print_rank_0("******model_save_path is {}******".format(path_model)) def print_rank_0_named_parameters(model, use_print_rank_0_data=False): """ 打印模型训练参数/数据类型信息 """ trainable_params = 0 all_param = 0 for name, param in model.named_parameters(): if use_print_rank_0_data: print_rank_0((name, param.data.dtype, param.requires_grad, param.data)) else: print_rank_0((name, param.data.dtype, param.requires_grad)) num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel all_param += num_params if param.requires_grad: trainable_params += num_params print_rank_0(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}") def prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", use_gradient_checkpointing=True, layer_norm_names=["layer_norm"]): r""" This method wrapps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` """ # 不要使用 model.half(), 这样会先截取精度再训练了, 最初data就要保持half for name, param in model.named_parameters(): # freeze base model's layers param.requires_grad = False # cast layer norm in fp32 for stability for 8bit models if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names): param.data = param.data.to(torch.float32) elif output_embedding_layer_name in name: # lm_head也需要是tf.float32(最后一层) param.data = param.data.to(torch.float32) else: param.data = param.data.to(torch.half) if use_gradient_checkpointing: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable() return model def generate_prompt(data_point, is_logger=False): # sorry about the formatting disaster gotta move fast # text_1 = f"指令:\n{data_point.get('instruction', '')}\n问:\n{data_point.get('input', '')}\n答:\n" \ # if data_point.get('input', '') else f"指令:\n{data_point.get('instruction', '')}\n答:\n" # text_2 = f"{data_point.get('output', '')}" text_a = data_point.get('a', '') prompt_str_1 = text_a # end with gMASK, <sop> x = tokenizer.encode(prompt_str_1) if len(x) > MAX_LENGTH_QA - 2: x = x[:MAX_LENGTH_QA - 2] if not x: x = [ID_PAD, ID_EOS] if x and x[-1] != ID_EOS: x += [ID_EOS] out = {"input_ids": x, "labels": []} if is_logger: print_rank_0(prompt_str_1) print_rank_0(out) return out def data_collator(batch): def get_position_ids(seq, bos_token_id): seq_length = len(seq) position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0) return position_ids def get_masks(seq, special_ids=IDS_ORG): """ padding-mask """ # mask until ID_SOP attention_mask = torch.ones((1, len(seq), len(seq))) attention_mask.tril_() # ### 如果 padding-right, 也mask掉 # for idx, s in enumerate(seq): # if s in special_ids: # attention_mask[..., idx] = 1 attention_mask = (attention_mask < 0.5).bool() return attention_mask len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1 for i in range(len(batch))] len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch)) batch_attention_mask = [] batch_position_ids = [] batch_input_ids = [] batch_labels = [] for ba in batch: x, y = ba.get("input_ids"), ba.get("labels") len_padding = len_max_batch - len(x) - len(y) if tokenizer.padding_side and tokenizer.padding_side == "left": labels = [-100] * len_padding + x + y input_ids = [ID_PAD] * (len_padding) + x + y else: labels = x + y + [-100] * len_padding input_ids = x + y + [ID_PAD] * (len_padding) tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP) tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG) tensor_input_ids = torch.tensor(input_ids, dtype=torch.long) tensor_labels = torch.tensor(labels, dtype=torch.long) batch_attention_mask.append(tensor_attention_mask) batch_position_ids.append(tensor_position_ids) batch_input_ids.append(tensor_input_ids) batch_labels.append(tensor_labels) # print_rank_0(batch_attention_mask) batch_attention_mask = torch.stack(batch_attention_mask) batch_position_ids = torch.stack(batch_position_ids) batch_input_ids = torch.stack(batch_input_ids) batch_labels = torch.stack(batch_labels) input_dict = { # "full_attention_mask": copy.deepcopy(batch_attention_mask), # "attention_mask": batch_attention_mask, # "position_ids": batch_position_ids, "input_ids": batch_input_ids, "labels": batch_labels, } # print_rank_0(input_dict) return input_dict def dfs_file(path_dir): """ 递归获取某个目录下的所有文件(所有层, 包括子目录) Args: path_dir[String]:, path of dir, eg. "/home/data" Returns: data[List]: data of input, eg. ["2020_01_08.txt"] """ path_files = [] for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件 for file in files: # 遍历文件 file_path = os.path.join(root, file) # 获取文件绝对路径 path_files.append(file_path) # 将文件路径添加进列表 files = list(set(path_files)) files.sort() # the same list return files def print_rank_0(*args): """ 只打印 0 号GPU的 """ if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。 print(*args) def local_rank_is_0(): """ 判断是哪台机子的 """ flag = False if torch.distributed.get_rank() == 0: flag = True return flag dist.init_process_group(backend="nccl") # torch.distributed.init_process_group() tokenizer = LLMTokenizer.from_pretrained(PATH_MODEL_PRETRAIN) # tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "left" # Allow batched inference # tokenizer.padding_side = "right" # Allow batched inference # ID_gMASK = 64790 # ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 50256 ID_SOP = 50256 ID_BOS = 50256 ID_EOS = 50256 ID_PAD = 50256 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } # model = GPT2LMHeadModel.from_pretrained(PATH_MODEL_PRETRAIN) gpt2_config = LLMConfig.from_json_file(PATH_MODEL_CONFIG) model = LLMForCausalLM(gpt2_config) model.init_weights() model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL model.config.use_cache = USE_CACHE model = model.cuda() print_rank_0_named_parameters(model) tensorboardx_witer = SummaryWriter(logdir=MODEL_SAVE_DIR) # files = dfs_file(DATA_PATH) # files = [files for file in files if "data_merge.0" in file or "data_merge.1" in file] ### 只有一个train的情况 # data = load_dataset("json", data_files={"train": files}) data = load_dataset("json", data_files=DATA_PATH) # data = load_dataset("json", data_dir=DATA_PATH) # if VAL_SET_SIZE > 0: # # train_val = data["train"].train_test_split(test_size=min(VAL_SET_SIZE, # # int(len(data["train"])/10000)), shuffle=True, seed=42) # VAL_SET_SIZE = max(min(VAL_SET_SIZE, int(len(data["train"])/10000)), 1) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=VAL_SET_SIZE, shuffle=True, seed=42) # train_data = train_val["train"].shuffle().map(generate_prompt) # val_data = train_val["test"].shuffle().map(generate_prompt) # else: generate_prompt(data["train"][0], is_logger=True) train_data = data["train"].shuffle().map(generate_prompt) val_data = None class CustomTrainer(transformers.Trainer): def compute_loss(self, model, inputs, return_outputs=False): inputs = {k: v.cuda() for k, v in inputs.items()} outputs = model(**inputs) # if contain labels, will calculate loss if local_rank_is_0: logs = {} tr_loss_scalar = self._nested_gather(outputs.loss.detach()).mean().item() logs["loss"] = round(tr_loss_scalar, 4) logs["lr"] = self.lr_scheduler.get_last_lr()[0] step = self.state.global_step for k, v in logs.items(): tensorboardx_witer.add_scalar(k, v, step) self.log(logs) if self.label_smoother is not None and "labels" in inputs: labels = inputs.pop("labels") else: labels = None # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] return (loss, outputs) if return_outputs else loss trainer = CustomTrainer( # data_collator=transformers.DataCollatorForSeq2Seq( # tokenizer, pad_to_multiple_of=8, # return_tensors="pt", padding=True # ), data_collator=data_collator, train_dataset=train_data, eval_dataset=val_data, model=model, args=transformers.TrainingArguments( gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS, per_device_train_batch_size=MICRO_BATCH_SIZE, learning_rate=LEARNING_RATE, num_train_epochs=EPOCHS, max_grad_norm=1.0, logging_steps=20, # warmup_steps=32, # warmup_steps=382, # 618 warmup_ratio=0.01, # warmup_steps=16, evaluation_strategy="no", lr_scheduler_type="constant", #'constant', # "cosine", logging_first_step=False, # evaluation_strategy="steps" if VAL_SET_SIZE > 0 else "no", # eval_steps=SAVE_STEPS if VAL_SET_SIZE > 0 else None, save_strategy="steps", save_total_limit=3,
save_steps=SAVE_STEPS,
15
2023-11-30 12:39:19+00:00
16k
owkin/fedeca
fedeca/tests/test_dp_end2end.py
[ { "identifier": "TorchDPFedAvgAlgo", "path": "fedeca/algorithms/torch_dp_fed_avg_algo.py", "snippet": "class TorchDPFedAvgAlgo(TorchFedAvgAlgo):\n \"\"\"To be inherited.\n\n Wraps the necessary operation so a torch model can be trained in the Federated\n Averaging strategy using DP.\n \"\"\"\n\n def __init__(\n self,\n model: torch.nn.Module,\n criterion: torch.nn.modules.loss._Loss,\n optimizer: torch.optim.Optimizer,\n dataset: torch.utils.data.Dataset,\n num_updates: int,\n batch_size: int,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n with_batch_norm_parameters: bool = False,\n seed: Optional[int] = None,\n use_gpu: bool = True,\n dp_target_epsilon: float = None,\n dp_target_delta: float = None,\n dp_max_grad_norm: float = None,\n num_rounds: int = None,\n *args,\n **kwargs,\n ):\n \"\"\"Instantiate a TorchDPFedAvgAlgo.\n\n Parameters\n ----------\n model : torch.nn.modules.module.Module\n A torch model.\n criterion : torch.nn.modules.loss._Loss\n A torch criterion (loss).\n optimizer : torch.optim.Optimizer\n A torch optimizer linked to the model.\n dataset : torch.utils.data.Dataset\n Refer to the doc of the parent class.\n This behavior can be changed by re-writing the `_local_train` or\n `predict` methods.\n num_updates : int\n The number of updates to perform. Note that here we do not use\n NpIndexGenerators.\n batch_size : int\n The batch-size to target in expectation (Poisson sampling).\n scheduler : torch.optim.lr_scheduler._LRScheduler, Optional\n A torch scheduler that will be called at every batch. If None, no\n scheduler will be used. Defaults to None.\n with_batch_norm_parameters : bool\n Whether to include the batch norm layer parameters in the federated\n average strategy. Defaults to False.\n seed : typing.Optional[int]\n Seed set at the algo initialization on each organization.\n Defaults to None.\n use_gpu : bool\n Whether to use the GPUs if they are available. Defaults to True.\n dp_target_epsilon : float\n The target epsilon for (epsilon, delta)-differential private guarantee.\n Defaults to None.\n dp_target_delta : float\n The target delta for (epsilon, delta)-differential private guarantee.\n Defaults to None.\n dp_max_grad_norm : float\n The maximum L2 norm of per-sample gradients; used to enforce\n differential privacy. Defaults to None.\n num_rounds : int\n The number of rounds used to train the algo. Although this is very\n peculiar for a substra Algorithm to need access to this quantity,\n Opacus needs the number of rounds and updates used to compute the\n total number of training steps in order to compute a noise level\n respecting user constraints.\n \"\"\"\n super().__init__(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n dataset=dataset,\n scheduler=scheduler,\n seed=seed,\n use_gpu=use_gpu,\n index_generator=None,\n *args,\n **kwargs,\n )\n self._with_batch_norm_parameters = with_batch_norm_parameters\n self.dp_target_delta = dp_target_delta\n self.dp_target_epsilon = dp_target_epsilon\n self.dp_max_grad_norm = dp_max_grad_norm\n self.num_rounds = num_rounds\n\n self._apply_dp = (\n (self.dp_target_epsilon is not None)\n and (self.dp_max_grad_norm is not None)\n and (self.dp_target_delta is not None)\n )\n\n if not (self._apply_dp):\n raise ValueError(\n \"Do not use this Algo without DP you risk running into batch\"\n \" sampling issues, instead use TorchFedAvgAlgo with NpIndexGenerator\"\n )\n if self.num_rounds is None:\n raise ValueError(\n \"if you want to perform DP-training you need to prespecify the\"\n \" number of rounds in advance.\"\n )\n self.num_updates = num_updates\n self.batch_size = batch_size\n\n self.num_total_steps = self.num_updates * self.num_rounds\n\n def _local_train(\n self,\n train_dataset: torch.utils.data.Dataset,\n ):\n \"\"\"Contain the local training loop.\n\n Train the model on ``num_updates`` minibatches for the torch dataset.\n\n Parameters\n ----------\n train_dataset : torch.utils.data.Dataset\n train_dataset build from the x and y returned by the opener.\n \"\"\"\n # Create torch dataloader it is important that it has a self.batch_size\n # batch size as len(train_data_loader) will be called by opacus\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=self.batch_size\n )\n if not hasattr(self, \"size_train_dataset\"):\n self.size_train_dataset = len(train_dataset)\n\n if not hasattr(\n self, \"accountant\"\n ): # if the attribute is not already there, need to instantiate the Engine\n # Important to use RDP to be able to use high epsilons\n # see https://github.com/pytorch/opacus/issues/604\n privacy_engine = PrivacyEngine(accountant=\"rdp\")\n\n if not hasattr(self, \"sample_rate\"):\n self.sample_rate = self.batch_size / len(train_dataset)\n else:\n assert np.allclose(\n self.sample_rate, self.batch_size / self.size_train_dataset\n ), \"The length of the dataset has changed\"\n\n # We will need it later\n self.noise_multiplier = get_noise_multiplier(\n target_epsilon=self.dp_target_epsilon,\n target_delta=self.dp_target_delta,\n sample_rate=self.sample_rate,\n steps=self.num_total_steps,\n accountant=privacy_engine.accountant.mechanism(),\n )\n\n (\n self._model,\n self._optimizer,\n train_data_loader,\n ) = privacy_engine.make_private(\n module=self._model,\n optimizer=self._optimizer,\n data_loader=train_data_loader,\n noise_multiplier=self.noise_multiplier,\n max_grad_norm=self.dp_max_grad_norm,\n poisson_sampling=True,\n )\n self.accountant = privacy_engine.accountant\n\n else:\n train_data_loader = DPDataLoader.from_data_loader(train_data_loader)\n\n for x_batch, y_batch in train_data_loader:\n x_batch = x_batch.to(self._device)\n y_batch = y_batch.to(self._device)\n # As batch-size is variable sometimes the batch is empty\n if x_batch.nelement() == 0:\n continue\n # Forward pass\n y_pred = self._model(x_batch)\n\n # Compute Loss\n loss = self._criterion(y_pred, y_batch)\n\n self._optimizer.zero_grad()\n loss.backward()\n\n self._optimizer.step()\n\n if self._scheduler is not None:\n self._scheduler.step()\n\n @remote_data\n def train(\n self,\n datasamples: Any,\n shared_state: Optional[FedAvgAveragedState] = None,\n ) -> FedAvgSharedState:\n \"\"\"Train method of the DP federated averaging strategy.\n\n This method is essentially the same as the regular federated average\n algorithm but without an index generator.\n\n Parameters\n ----------\n datasamples : typing.Any\n Input data returned by the ``get_data`` method from the opener.\n shared_state : FedAvgAveragedState, Optional\n Dictionary containing torch parameters that will be set to the model.\n Defaults to None.\n\n Returns\n -------\n FedAvgSharedState\n Weight update (delta between fine-tuned weights and previous weights).\n \"\"\"\n # Note that we don't simply inherit from the method from FedAvgTorchAlgo\n # because it assumes the existence of the NpIndexGenerator\n\n # Create torch dataset\n train_dataset = self._dataset(datasamples, is_inference=False)\n\n if shared_state is not None:\n # The shared states is the average of the model parameter updates\n # for all organizations\n # Hence we need to add it to the previous local state parameters\n parameter_updates = [\n torch.from_numpy(x).to(self._device)\n for x in shared_state.avg_parameters_update\n ]\n weight_manager.increment_parameters(\n model=self._model,\n updates=parameter_updates,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n old_parameters = weight_manager.get_parameters(\n model=self._model,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n # Train mode for torch model\n self._model.train()\n\n # Train the model\n self._local_train(train_dataset)\n\n self._model.eval()\n\n parameters_update = weight_manager.subtract_parameters(\n parameters=weight_manager.get_parameters(\n model=self._model,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n ),\n parameters_to_subtract=old_parameters,\n )\n\n # Re set to the previous state\n weight_manager.set_parameters(\n model=self._model,\n parameters=old_parameters,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n return FedAvgSharedState(\n n_samples=len(train_dataset),\n parameters_update=[p.cpu().detach().numpy() for p in parameters_update],\n )\n\n def _local_predict(\n self,\n predict_dataset: torch.utils.data.Dataset,\n predictions_path,\n return_predictions=False,\n ):\n \"\"\"Predict.\n\n Parameters\n ----------\n predict_dataset : torch.utils.data.Dataset\n Predict dataset built from the `x` returned by the opener.\n\n Important\n ---------\n The responsibility is on the user to save the computed predictions.\n Substrafl provides the `TorchAlgo._save_predictions` method for this\n purpose.\n The user can load those predictions from a metric file with the command:\n `y_pred = np.load(inputs['predictions'])`.\n\n Raises\n ------\n BatchSizeNotFoundError\n No default batch size has been found to perform local prediction.\n Please override the predict function of your algorithm.\n \"\"\"\n # Note that we don't simply inherit from the method from FedAvgTorchAlgo\n # because it assumes the existence of the NpIndexGenerator\n\n predict_loader = torch.utils.data.DataLoader(\n predict_dataset, batch_size=self.batch_size, shuffle=False, drop_last=False\n )\n\n self._model.eval()\n\n predictions = []\n with torch.no_grad():\n for x in predict_loader:\n x = x.to(self._device)\n predictions.append(self._model(x))\n predictions = torch.cat(predictions, 0)\n predictions = predictions.cpu().detach()\n if return_predictions:\n return predictions\n else:\n self._save_predictions(predictions, predictions_path)\n\n def _get_state_to_save(self) -> dict:\n \"\"\"Get all attibutes to save and pass on to next state.\n\n Returns\n -------\n dict\n The dict with all quantities to persist.\n \"\"\"\n checkpoint = super()._get_state_to_save()\n\n list_attrs_to_save = [\n \"dp_max_grad_norm\",\n \"dp_target_epsilon\",\n \"dp_target_delta\",\n \"num_rounds\",\n \"num_updates\",\n \"num_total_steps\",\n \"batch_size\",\n ]\n list_of_attrs_after_train = [\n \"noise_multiplier\",\n \"sample_rate\",\n \"size_train_dataset\",\n ]\n # For some reason this method is called before ever calling train so\n # at first it doesn't have an accountant\n if hasattr(self, \"accountant\"):\n checkpoint[\"privacy_accountant_state_dict\"] = self.accountant.state_dict()\n list_attrs_to_save += list_of_attrs_after_train\n\n for attr in list_attrs_to_save:\n checkpoint[attr] = getattr(self, attr)\n\n return checkpoint\n\n def _update_from_checkpoint(self, path) -> dict:\n \"\"\"Set self attributes using saved values.\n\n Parameters\n ----------\n path : Path\n Path towards the checkpoint to use.\n\n Returns\n -------\n dict\n The emptied checkpoint.\n \"\"\"\n # One cannot simply call checkpoint = super()._update_from_checkpoint(path)\n # because we have to change the model class if it should be changed\n # (and optimizer) aka if we find a specific key in the checkpoint\n assert (\n path.is_file()\n ), f'Cannot load the model - does not exist {list(path.parent.glob(\"*\"))}'\n checkpoint = torch.load(path, map_location=self._device)\n # For some reason substrafl save and load client before calling train\n if \"privacy_accountant_state_dict\" in checkpoint:\n self.accountant = RDPAccountant()\n self.accountant.load_state_dict(\n checkpoint.pop(\"privacy_accountant_state_dict\")\n )\n self.sample_rate = checkpoint.pop(\"sample_rate\")\n self.size_train_dataset = checkpoint.pop(\"size_train_dataset\")\n self.noise_multiplier = checkpoint.pop(\"noise_multiplier\")\n # The init is messing up the fact that the model has become\n # a grad sampler and the optimizer a DPOptimizer, their classes\n # do not persist between serializations\n # Those lines will allow to load corresponding state_dicts wo errors\n if not isinstance(self._model, GradSampleModule):\n self._model = wrap_model(self._model, grad_sample_mode=\"hooks\")\n\n if not isinstance(self._optimizer, DPOptimizer):\n self._optimizer = DPOptimizer(\n self._optimizer,\n noise_multiplier=self.noise_multiplier,\n max_grad_norm=self.dp_max_grad_norm,\n expected_batch_size=self.batch_size,\n )\n\n self._optimizer.attach_step_hook(\n self.accountant.get_optimizer_hook_fn(self.sample_rate)\n )\n\n self._model.load_state_dict(checkpoint.pop(\"model_state_dict\"))\n\n if self._optimizer is not None:\n self._optimizer.load_state_dict(checkpoint.pop(\"optimizer_state_dict\"))\n\n if self._scheduler is not None:\n self._scheduler.load_state_dict(checkpoint.pop(\"scheduler_state_dict\"))\n\n self._index_generator = checkpoint.pop(\"index_generator\")\n\n if self._device == torch.device(\"cpu\"):\n torch.set_rng_state(checkpoint.pop(\"rng_state\").to(self._device))\n else:\n torch.cuda.set_rng_state(checkpoint.pop(\"rng_state\").to(\"cpu\"))\n\n attr_names = [\n \"dp_max_grad_norm\",\n \"dp_target_epsilon\",\n \"dp_target_delta\",\n \"num_rounds\",\n \"num_updates\",\n \"num_total_steps\",\n \"batch_size\",\n ]\n\n for attr in attr_names:\n setattr(self, attr, checkpoint.pop(attr))\n\n return checkpoint" }, { "identifier": "LogisticRegressionTorch", "path": "fedeca/fedeca_core.py", "snippet": "class LogisticRegressionTorch(nn.Module):\n \"\"\"Pytorch logistic regression class.\"\"\"\n\n def __init__(self, ndim, torch_dtype=torch.float64):\n \"\"\"Initialize Logistic Regression model in PyTorch.\n\n Parameters\n ----------\n ndim : int\n Number of input dimensions.\n torch_dtype : torch.dtype, optional\n Data type for PyTorch tensors, by default torch.float64.\n \"\"\"\n self.torch_dtype = torch_dtype\n self.ndim = ndim\n super(LogisticRegressionTorch, self).__init__()\n self.fc1 = nn.Linear(self.ndim, 1).to(self.torch_dtype)\n # Zero-init as in sklearn\n self.fc1.weight.data.fill_(0.0)\n self.fc1.bias.data.fill_(0.0)\n\n def forward(self, x, eval=False):\n \"\"\"Perform a forward pass through the Logistic Regression model.\n\n Parameters\n ----------\n x : torch.Tensor\n Input tensor of shape (batch_size, ndim).\n eval : bool, optional\n Set to True during evaluation, by default False.\n\n Returns\n -------\n torch.Tensor\n Predicted probabilities after passing through sigmoid activation.\n \"\"\"\n x = self.fc1(x)\n return torch.sigmoid(x)" }, { "identifier": "TestTempDir", "path": "fedeca/tests/common.py", "snippet": "class TestTempDir(unittest.TestCase):\n \"\"\"Base class for tests.\n\n Base class which should be used for every tests that need\n a temporary directory (to store data, logs etc).\n The directory is shared across the tests of a TestCase, and\n it's removed at the end of the TestCase (not at each test !).\n\n Attributes\n ----------\n test_dir: str\n the path to the temporary directory of the TestCase.\n\n Notes\n -----\n If the class methods setUpClass or tearDownClass are overridden,\n please make sure to call `super()...``\n \"\"\"\n\n _test_dir = None\n test_dir = None\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set up the class.\"\"\"\n super(TestTempDir, cls).setUpClass()\n cls._test_dir = tempfile.TemporaryDirectory()\n cls.test_dir = cls._test_dir.name # Keep a reference to the path\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Tear down the class.\"\"\"\n super(TestTempDir, cls).tearDownClass()\n # This function rm the directory\n cls._test_dir.cleanup()\n rmdir(Path(fedeca.__file__).parent.parent)" }, { "identifier": "Experiment", "path": "fedeca/utils/substrafl_utils.py", "snippet": "class Experiment:\n \"\"\"Experiment class.\"\"\"\n\n def __init__(\n self,\n strategies: list,\n num_rounds_list: list[int],\n ds_client=None,\n train_data_nodes: Union[list[TrainDataNode], None] = None,\n metrics_dicts_list: Union[list[dict], None] = None,\n test_data_nodes: Union[list[TestDataNode], None] = None,\n aggregation_node: Union[AggregationNode, None] = None,\n evaluation_frequency: Union[int, None] = None,\n experiment_folder: str = \"./experiments\",\n clean_models: bool = False,\n fedeca_path: Union[str, None] = None,\n algo_dependencies: Union[list, None] = None,\n ):\n \"\"\"Initialize an experiment.\n\n Parameters\n ----------\n ds_client : fl.client.Client\n Federated Learning client object used to register computations.\n strategies : list\n List of strategies to run.\n train_data_nodes : Union[list[TrainDataNode], None]\n List of data nodes for training. If None cannot use the run method\n directly.\n num_rounds_list : list\n List of number of rounds for each strategy.\n metrics_dicts_list : list[dict], optional\n Dict of metric functions, by default None.\n test_data_nodes : list, optional\n List of data nodes for testing, by default None.\n aggregation_node : fl.data.DataNode, optional\n Aggregation node, by default None.\n evaluation_frequency : int, optional\n Frequency of evaluation, by default 1.\n experiment_folder : str, optional\n Folder path for experiment outputs, by default \"./experiments\".\n clean_models : bool, optional\n Whether to clean models after training, by default False.\n fedeca_path : str, optional\n Path to the FedECA package, by default None.\n algo_dependencies : list, optional\n List of algorithm dependencies, by default [].\n \"\"\"\n if metrics_dicts_list is not None:\n assert len(strategies) == len(metrics_dicts_list)\n assert len(num_rounds_list) == len(strategies)\n self.strategies = strategies\n self.metrics_dicts_list = metrics_dicts_list\n self.num_rounds_list = num_rounds_list\n self.ds_client = ds_client\n self.train_data_nodes = train_data_nodes\n self.test_data_nodes = test_data_nodes\n self.simu_mode = False\n\n if self.test_data_nodes is None:\n assert metrics_dicts_list is not None\n if self.train_data_nodes is not None:\n self.test_data_nodes = [\n TestDataNode(\n t.organization_id, t.data_manager_key, t.data_sample_keys, []\n )\n for t in self.train_data_nodes\n ]\n else:\n if metrics_dicts_list and not all(\n [len(t.metric_functions) == 0 for t in self.test_data_nodes]\n ):\n print(\n \"\"\"WARNING: you are passing metrics to test data nodes with existing\n metric_functions this will overwrite them\"\"\"\n )\n print(\n [\n (f\"Client {i}\", t.metric_functions)\n for i, t in enumerate(self.test_data_nodes)\n ]\n )\n\n self.evaluation_frequency = evaluation_frequency\n\n self.aggregation_node = aggregation_node\n self.experiment_folder = experiment_folder\n self.clean_models = clean_models\n\n # Packaging the right dependencies\n if fedeca_path is None:\n fedeca_path = os.getcwd()\n repo_folder = Path(\n git.Repo(fedeca_path, search_parent_directories=True).working_dir\n ).resolve()\n wheel_folder = repo_folder / \"temp\"\n os.makedirs(wheel_folder, exist_ok=True)\n for stale_wheel in wheel_folder.glob(\"fedeca*.whl\"):\n stale_wheel.unlink()\n process = subprocess.Popen(\n f\"python -m build --wheel --outdir {wheel_folder} {repo_folder}\",\n shell=True,\n stdout=subprocess.PIPE,\n )\n process.wait()\n assert process.returncode == 0, \"Failed to build the wheel\"\n wheel_path = next(wheel_folder.glob(\"fedeca*.whl\"))\n if algo_dependencies is None:\n algo_dependencies = []\n\n self.algo_dependencies = Dependency(\n pypi_dependencies=[\"numpy==1.23.1\", \"torch==1.11.0\", \"lifelines\", \"pandas\"]\n + algo_dependencies,\n local_dependencies=[wheel_path],\n )\n\n self.experiment_path = str(Path(self.experiment_folder))\n os.makedirs(self.experiment_path, exist_ok=True)\n self.run_strategies = 0\n self.tasks = {}\n self.compute_plan_keys = []\n self.performances_strategies = []\n\n def fit(\n self,\n data: pd.DataFrame,\n nb_clients: Union[int, None] = None,\n split_method: Union[Callable, str] = \"uniform\",\n split_method_kwargs: Union[Callable, None] = None,\n data_path: Union[str, None] = None,\n backend_type: str = \"subprocess\",\n urls: Union[list[str], None] = None,\n tokens: Union[list[str], None] = None,\n ):\n \"\"\"Fit strategies on global data split across clients.\n\n For test if provided we use test_data_nodes from int or the\n train_data_nodes in the latter train=test.\n\n Parameters\n ----------\n data : pd.DataFrame\n The global data to be split has to be a dataframe as we only support\n one opener type.\n nb_clients : Union[int, None], optional\n The number of clients used to split data across, by default None\n split_method : Union[Callable, None], optional\n How to split data across the nb_clients, by default None.\n split_method_kwargs : Union[Callable, None], optional\n Argument of the function used to split data, by default None.\n data_path : Union[str, None]\n Where to store the data on disk when backend is not remote.\n backend_type: str\n The backend to use for substra. Can be either:\n [\"subprocess\", \"docker\", \"remote\"]. Defaults to \"subprocess\".\n urls: Union[list[str], None]\n Urls corresponding to clients API if using remote backend_type.\n Defaults to None.\n tokens: Union[list[str], None]\n Tokens necessary to authenticate each client API if backend_type\n is remote. Defauts to None.\n \"\"\"\n # Reset experiment so that it can fit on a new dataset\n self.reset_experiment()\n\n if data_path is not None:\n self.experiment_path = data_path\n\n # We first have to create the TrainDataNodes objects for this we split\n # the data into nb_clients using split_method\n (\n self.clients,\n self.train_data_nodes,\n test_data_nodes,\n _,\n _,\n ) = split_dataframe_across_clients(\n df=data,\n n_clients=nb_clients,\n split_method=split_method,\n split_method_kwargs=split_method_kwargs,\n backend_type=backend_type,\n data_path=data_path,\n urls=urls,\n tokens=tokens,\n )\n if self.test_data_nodes is None:\n self.test_data_nodes = test_data_nodes\n self.run()\n\n def run(self, num_strategies_to_run=None):\n \"\"\"Run the experiment.\n\n Parameters\n ----------\n num_strategies_to_run : int, optional\n Number of strategies to run, by default None.\n \"\"\"\n assert (\n self.train_data_nodes is not None\n ), \"you have to define train_data_nodes first before running\"\n assert (\n self.test_data_nodes is not None\n ), \"you have to define test_data_nodes first before running\"\n if num_strategies_to_run is None:\n num_strategies_to_run = len(self.strategies) - self.run_strategies\n assert (self.run_strategies + num_strategies_to_run) <= len(\n self.strategies\n ), f\"\"\"You cannot run {num_strategies_to_run} strategies more there is only\n {len(self.strategies)} strategies and you have already run {self.run_strategies}\n of them.\"\"\"\n # If no client is given we take the first one\n if self.ds_client is None:\n self.ds_client = self.clients[list(self.clients.keys())[0]]\n\n # If no AggregationNode is given we take the first one\n if self.aggregation_node is None:\n print(\"Using the first client as a server.\")\n kwargs_agg_node = {\n \"organization_id\": self.train_data_nodes[0].organization_id\n }\n self.aggregation_node = AggregationNode(**kwargs_agg_node)\n\n if not hasattr(self, \"experiment_kwargs\"):\n self.experiment_kwargs = {\n \"experiment_folder\": self.experiment_path,\n \"clean_models\": self.clean_models,\n \"dependencies\": self.algo_dependencies,\n \"client\": self.ds_client,\n }\n if hasattr(self.ds_client, \"is_simu\"):\n self.simu_mode = self.ds_client.is_simu\n\n # inelegant but cannot slice on a zip object\n strategies = self.strategies[\n self.run_strategies : (self.run_strategies + num_strategies_to_run)\n ] # noqa: E203\n metrics_dicts_list = self.metrics_dicts_list[\n self.run_strategies : (\n self.run_strategies + num_strategies_to_run\n ) # noqa: E203\n ]\n num_rounds_list = self.num_rounds_list[\n self.run_strategies : (\n self.run_strategies + num_strategies_to_run\n ) # noqa: E203\n ]\n for i, (strategy, metrics_dict, num_rounds) in enumerate(\n zip(strategies, metrics_dicts_list, num_rounds_list)\n ):\n for t in self.test_data_nodes:\n t.metric_functions = metrics_dict\n\n current_kwargs = self.experiment_kwargs\n current_kwargs[\"strategy\"] = strategy\n current_kwargs[\"num_rounds\"] = num_rounds\n current_kwargs[\"train_data_nodes\"] = self.train_data_nodes\n current_kwargs[\"aggregation_node\"] = self.aggregation_node\n # Evaluation frequency depend on current strategy\n # If None evaluate once at the end of the strategy\n if self.evaluation_frequency is None:\n evaluation_strategy = EvaluationStrategy(\n test_data_nodes=self.test_data_nodes,\n eval_rounds=[num_rounds_list[i]],\n )\n else:\n evaluation_strategy = EvaluationStrategy(\n test_data_nodes=self.test_data_nodes,\n eval_frequency=self.evaluation_frequency[i],\n )\n current_kwargs[\"evaluation_strategy\"] = evaluation_strategy\n current_kwargs[\"simu_mode\"] = self.simu_mode\n current_kwargs[\"name\"] = f\"Fedeca: {strategy.__class__.__name__}\"\n xp_output = execute_experiment(**current_kwargs)\n\n if self.simu_mode:\n scores = [t.scores for t in self.test_data_nodes]\n robust_cox_variance = False\n for idx, s in enumerate(scores):\n print(f\"====Client {idx}====\")\n try:\n print(s[-1])\n except IndexError:\n robust_cox_variance = True\n print(\"No metric\")\n # TODO Check that it is well formatted it's probably not\n self.performances_strategies.append(pd.DataFrame(xp_output))\n # Hacky hacky hack\n if robust_cox_variance:\n xp_output = self.train_data_nodes\n else:\n xp_output = self.train_data_nodes[0]\n\n self.compute_plan_keys.append(xp_output)\n\n if not (self.simu_mode):\n self.tasks[self.compute_plan_keys[i].key] = {}\n tasks = self.ds_client.list_task(\n filters={\"compute_plan_key\": [self.compute_plan_keys[i].key]}\n )[::-1]\n tasks_names = [t.function.name for t in tasks]\n self.tasks[self.compute_plan_keys[i].key][\"tasks\"] = tasks\n self.tasks[self.compute_plan_keys[i].key][\"tasks_names\"] = tasks_names\n self.tasks[self.compute_plan_keys[i].key][\"num_tasks\"] = len(tasks)\n\n self.run_strategies += 1\n\n def get_outmodel(self, task_name, strategy_idx=0, idx_task=0):\n \"\"\"Get the output model.\n\n Parameters\n ----------\n task_name : str\n Name of the task.\n strategy_idx : int, optional\n Index of the strategy, by default 0.\n idx_task : int, optional\n Index of the task, by default 0.\n \"\"\"\n assert not (self.simu_mode), \"This function cannot be used in simu mode\"\n\n # We get all matches and order them chronologically\n tasks_dict_from_strategy = self.tasks[self.compute_plan_keys[strategy_idx].key]\n return get_outmodel_function(\n task_name, idx_task=idx_task, tasks_dict=tasks_dict_from_strategy\n )\n\n def reset_experiment(self):\n \"\"\"Reset the state of the object.\n\n So it can be fit with a new dataset.\n \"\"\"\n self.run_strategies = 0\n self.tasks = {}\n self.compute_plan_keys = []\n self.performances_strategies = []\n self.train_data_nodes = None\n self.test_data_nodes = None" }, { "identifier": "make_substrafl_torch_dataset_class", "path": "fedeca/utils/substrafl_utils.py", "snippet": "def make_substrafl_torch_dataset_class(\n target_cols,\n event_col,\n duration_col,\n dtype=\"float64\",\n return_torch_tensors=False,\n):\n \"\"\"Create a custom SubstraflTorchDataset class for survival analysis.\n\n Parameters\n ----------\n target_cols : list\n List of target columns.\n event_col : str\n Name of the event column.\n duration_col : str\n Name of the duration column.\n dtype : str, optional\n Data type, by default \"float64\".\n return_torch_tensors : bool, optional\n Returns torch.Tensor. Defaults to False.\n\n Returns\n -------\n type\n Custom SubstraflTorchDataset class.\n \"\"\"\n assert len(target_cols) == 1 or all(\n [t in [event_col, duration_col] for t in target_cols]\n )\n if len(target_cols) == 1:\n print(f\"Making a dataset class to fit a model to predict {target_cols[0]}\")\n columns_to_drop = [event_col, duration_col]\n elif len(target_cols) == 2:\n assert set(target_cols) == set(\n [event_col, duration_col]\n ), \"Your targets should be event_col and duration_col\"\n # DO NOT MODIFY THIS LINE !!!!!\n target_cols = [duration_col, event_col]\n columns_to_drop = []\n\n class MySubstraflTorchDataset(SubstraflTorchDataset):\n def __init__(self, datasamples, is_inference):\n super().__init__(\n datasamples=datasamples,\n is_inference=is_inference,\n target_columns=target_cols,\n columns_to_drop=columns_to_drop,\n dtype=dtype,\n return_torch_tensors=return_torch_tensors,\n )\n\n return MySubstraflTorchDataset" }, { "identifier": "make_accuracy_function", "path": "fedeca/utils/substrafl_utils.py", "snippet": "def make_accuracy_function(treatment_col: str):\n \"\"\"Build accuracy function.\n\n Parameters\n ----------\n treatment_col: str,\n Column name for the treatment allocation.\n \"\"\"\n\n def accuracy(datasamples, predictions_path):\n y_true = datasamples[treatment_col]\n if isinstance(predictions_path, str) or isinstance(predictions_path, Path):\n y_pred = np.load(predictions_path)\n else:\n y_pred = predictions_path\n return accuracy_score(y_true, y_pred > 0.5)\n\n return accuracy" }, { "identifier": "CoxData", "path": "fedeca/utils/survival_utils.py", "snippet": "class CoxData:\n \"\"\"Simulate Cox data.\n\n This class simulates survival data following Cox model assumptions.\n \"\"\"\n\n def __init__(\n self,\n n_samples: int = 1000,\n ndim: int = 10,\n features_type: Literal[\n \"cov_toeplitz\",\n \"cov_uniform\",\n \"indep_gauss\",\n ] = \"cov_toeplitz\",\n cate: float | Literal[\"random\", \"linear\"] = 1.0,\n propensity: Literal[\"constant\", \"linear\"] = \"constant\",\n prop_treated: float = 0.5,\n overlap: float = 0.0,\n cov_corr: float = 0.5,\n scale_t: float = 1.0,\n shape_t: float = 1.0,\n censoring_factor: float = 0.5,\n percent_ties: Optional[float] = None,\n random_censoring: bool = False,\n seed: _SeedType = None,\n standardize_features: bool = True,\n dtype: Literal[\"float32\", \"float64\"] = \"float64\",\n ):\n r\"\"\"Cox Data generator class.\n\n This class generates data according to a Cox proportional hazards model\n in continuous time as follows:\n .. math::\n S(t|x) = P(T > t | X=x)\n \\\\lambda(t|x) = \\\\frac{d \\\\log S(t|x)}{dt}\n \\\\lambda(t|x) = \\\\lambda_0(t)e^{\\\\beta^T x}\n \\\\Lambda_0(t|x) = \\\\int_0^t \\\\lambda_0(u)du = (\\\\frac{t}{s})^k\n X \\\\sim \\\\mathcal{N}(0, C)\n \\\\beta \\\\sim \\\\mathcal{N}(0, I)\n\n Parameters\n ----------\n n_samples: int, optional\n Number of samples to generate. Defaults to 1000\n ndim: int, optional\n Number of features, defaults to 10.\n features_type: `{\"cov_toeplitz\", \"cov_uniform\", \"indep_gauss\"}`, optional\n cate: {float, `{\"random\", \"linear\"}`, Callable}\n The way to assign treatment effect (hazard ratio) to samples.\n * \"float\": Constant hazard ratio for all samples.\n * \"random\": Hazard ratio follows log-normal distribution.\n * \"linear\": Hazard ratio depends on a linear combination of\n features with random coefficients.\n Defaults to 1.0 (no treatment effect).\n propensity: {`{\"constant\", \"linear\"}`, Callable}\n The way to assign propensity scores (probabilities of being treated)\n to samples.\n * \"linear\": Propensity scores depend on a linear combination of\n features with random coefficients.\n * \"constant\": All propensity scores take the value of the constant\n defined by the parameter `prop_treated`.\n Defaults to \"constant\".\n cov_corr: float, optional\n The correlation of the covariance matrix.\n scale_t: float, optional\n Scale parameter `s` in the equations above. Defaults to `1.0`.\n shape_t: float, optional\n Shape parameter `k` in the equations above. Defaults to `1.0`.\n censoring_factor: float, optional\n Parameter used to determine the probability of being censored\n (with respect to the median). Defaults to `0.5`.\n percent_ties: float, optional\n Parameter that control the percentage of samples who have the same outcome.\n Defaults to None.\n random_censoring: bool, optional\n Whether to censor completely independently of the rest or not.\n When true, censors samples with probability censoring_factor.\n When false, samples are censored if the drawn event times\n (drawn from the Cox model) is smaller than an independent\n exponential variable with scale factor\n `censoring_factor * mean_time`, where `mean_time`\n is the empirical mean of drawn event times.\n Defaults to False.\n seed: {None, int, Sequence[int], SeedSequence, BitGenerator, Generator},\n optional\n The seed for reproducibility. Defaults to None.\n standardize_features: bool, optional\n Whether to standardize features or not. Defaults to True.\n dtype : `{\"float64\", \"float32\"}`, default=\"float64\"\n Type of the arrays used.\n \"\"\"\n self.n_samples = n_samples\n self.ndim = ndim\n self.features_type: Final = features_type\n self.rng = np.random.default_rng(seed)\n self.prop_treated = prop_treated\n self.overlap = overlap\n self.cate = cate\n self.propensity = propensity\n self.cov_corr = cov_corr\n self.scale_t = scale_t\n self.shape_t = shape_t\n self.censoring_factor = censoring_factor\n self.random_censoring = random_censoring\n self.standardize_features = standardize_features\n self.dtype: Final = dtype\n self.coeffs = None\n self.percent_ties = percent_ties\n self.average_treatment_effect_ = None\n self.probability_treated = None\n\n def standardize_data(self, features: np.ndarray):\n \"\"\"Standardize data. Make data reduced centered.\n\n Standardize the data by substracting the mean of each columns\n and dividing by the standard deviation.\n\n Parameters\n ----------\n features : np.ndarray\n Features to standardize.\n\n Returns\n -------\n np.ndarray\n Normalized features.\n \"\"\"\n features -= features.mean(axis=0)\n features /= features.std(axis=0)\n return features\n\n def generate_data(\n self,\n n_samples: Optional[int] = None,\n seed: _SeedType = None,\n use_cate: bool = True,\n ):\n \"\"\"Generate final survival data.\n\n Use the collection of methods of the class to\n generate data following Cox assumptions.\n\n Returns\n -------\n tuple\n A tuple of np.ndarrays.\n\n Raises\n ------\n ValueError\n If `propensity` is neither \"constant\" nor \"linear\".\n ValueError\n If `cate` is neither \"linear\", \"random\" nor a constant type int or float.\n \"\"\"\n if n_samples is None:\n n_samples = self.n_samples\n if seed is None:\n seed = self.rng\n rng = np.random.default_rng(seed)\n\n if self.features_type == \"cov_uniform\":\n X = features_normal_cov_uniform(\n n_samples, self.ndim, dtype=self.dtype, seed=rng\n )\n elif self.features_type == \"indep_gauss\":\n X = rng.standard_normal(size=(n_samples, self.ndim)).astype(self.dtype)\n else:\n X = features_normal_cov_toeplitz(\n n_samples, self.ndim, self.cov_corr, dtype=self.dtype, seed=rng\n )\n if self.standardize_features:\n X = self.standardize_data(X)\n\n if self.propensity == \"constant\":\n treat_alloc = random_treatment_allocation(\n n_samples, self.prop_treated, seed=rng\n )\n propensity_scores = np.repeat(self.prop_treated, n_samples)\n\n elif self.propensity == \"linear\":\n func_propensity = linear_propensity(\n ndim=self.ndim,\n overlap=self.overlap,\n prop_treated=self.prop_treated,\n seed=rng,\n )\n propensity_scores = np.apply_along_axis(func_propensity, -1, X)\n treat_alloc = rng.binomial(1, propensity_scores)\n else:\n raise ValueError(\"propensity must be either `constant` or `linear`\")\n\n self.coeffs = rng.normal(size=(self.ndim,)).astype(self.dtype)\n u = X.dot(self.coeffs)\n if use_cate:\n if self.cate == \"linear\":\n func_cate = linear_cate(ndim=self.ndim, seed=rng)\n elif self.cate == \"random\":\n func_cate = random_cate(seed=rng)\n elif isinstance(self.cate, (int, float)):\n func_cate = constant_cate(self.cate)\n else:\n raise ValueError(\n \"\"\"cate must be either `linear`, `random` or a constant type\n int or float\"\"\"\n )\n\n cate_vector = np.apply_along_axis(func_cate, -1, X)\n self.average_treatment_effect_ = np.mean(cate_vector[treat_alloc == 1])\n self.probability_treated = cate_vector\n u += treat_alloc * np.log(cate_vector)\n # Simulation of true times\n time_hazard_baseline = -np.log(\n rng.uniform(0, 1.0, size=n_samples).astype(self.dtype)\n )\n time_cox_unscaled = time_hazard_baseline * np.exp(-u)\n times = self.scale_t * time_cox_unscaled ** (1.0 / self.shape_t)\n\n # induce samples with same times\n if self.percent_ties is not None:\n nb_ties_target = int(self.percent_ties * n_samples)\n if nb_ties_target >= 2:\n # sklearn not supporting generator yet, pass int to random_state\n # ref: https://github.com/scikit-learn/scikit-learn/issues/16988\n seed_seq = rng.bit_generator._seed_seq.spawn(1)[0] # type: ignore\n random_state = seed_seq.generate_state(1)[0]\n original_times = copy.deepcopy(times)\n # We progressively reduce the number of bins until there are\n # only 2 bins starting with npoints - 1 bins\n reached = False\n for nbins in range(n_samples - 1, 1, -1):\n discretizer = KBinsDiscretizer(\n n_bins=nbins,\n encode=\"ordinal\",\n strategy=\"quantile\",\n random_state=random_state,\n )\n times = discretizer.fit_transform(original_times.reshape((-1, 1)))\n nb_ties_reached = n_samples - len(np.unique(times))\n if (nb_ties_reached - nb_ties_target) >= 0:\n reached = True\n break\n if not reached:\n raise ValueError(\"This should not happen, lower percent_ties\")\n times = times.reshape((-1))\n\n else:\n raise ValueError(\"Choose a larger number of ties\")\n\n avg_time = times.mean()\n\n # Simulation of the censoring times. times is returned in absolute value\n if self.random_censoring:\n censoring = rng.uniform(size=n_samples) < self.censoring_factor\n times[censoring] = [rng.uniform(0, t) for t in times[censoring].tolist()]\n censoring = censoring.astype(\"uint8\")\n else:\n c_sampled = rng.exponential(\n scale=self.censoring_factor * avg_time, size=n_samples\n ).astype(self.dtype)\n\n censoring = (times > c_sampled).astype(\"uint8\")\n times[censoring] = np.minimum(times, c_sampled)\n\n return X, times, censoring, treat_alloc, propensity_scores\n\n def generate_dataframe(\n self,\n n_samples: Optional[int] = None,\n prefix: str = \"X_\",\n duration_col: str = \"time\",\n event_col: str = \"event\",\n treated_col: str = \"treatment\",\n ps_col: str = \"propensity_scores\",\n seed: _SeedType = None,\n ):\n \"\"\"Generate dataframe.\"\"\"\n (\n covariates,\n times,\n censoring,\n treatments,\n propensity_scores,\n ) = self.generate_data(n_samples, seed=seed)\n data = pd.DataFrame(covariates).add_prefix(prefix)\n data[duration_col] = times\n data[event_col] = 1 - censoring\n data[treated_col] = treatments\n data[ps_col] = propensity_scores\n return data" }, { "identifier": "make_categorical", "path": "fedeca/utils/survival_utils.py", "snippet": "def make_categorical(X, up_to: int = 25, seed: _SeedType = None):\n \"\"\"Convert continuous features in a dataset to categorical features.\n\n This function takes a dataset matrix `X` and converts its first `up_to` columns\n (features) into categorical features using the KBinsDiscretizer method.\n It performs min-max scaling on each feature before discretization.\n\n Parameters\n ----------\n X : np.ndarray\n Input dataset matrix of shape (n_samples, n_features).\n up_to : int, optional\n Number of columns to convert to categorical features, by default 25.\n seed : int or None, optional\n Seed for the random number generator, by default None.\n\n Returns\n -------\n np.ndarray, np.ndarray\n Two arrays: `Xleft` containing the modified categorical features\n and `Xright` containing the remaining original features.\n \"\"\"\n rng = np.random.default_rng(seed)\n Xleft = X[:, :up_to]\n Xright = X[:, up_to:]\n mm_normalizer = MinMaxScaler()\n nbins_vector = rng.integers(2, 10, size=up_to)\n for j, nbins in enumerate(nbins_vector):\n # sklearn not supporting generator yet, pass int to random_state\n # ref: https://github.com/scikit-learn/scikit-learn/issues/16988\n seed_seq = rng.bit_generator._seed_seq.spawn(1)[0] # type: ignore\n random_state = seed_seq.generate_state(1)[0]\n discretizer = KBinsDiscretizer(\n n_bins=nbins, encode=\"ordinal\", random_state=random_state\n )\n Xleft[:, j] = mm_normalizer.fit_transform(Xleft[:, j][:, None])[:, 0]\n Xleft[:, j] = discretizer.fit_transform(Xleft[:, j][:, None])[:, 0]\n return Xleft, Xright" } ]
import numpy as np import pandas as pd import torch import torch.nn as nn from substrafl.strategies import FedAvg from torch.optim import SGD from fedeca.algorithms.torch_dp_fed_avg_algo import TorchDPFedAvgAlgo from fedeca.fedeca_core import LogisticRegressionTorch from fedeca.tests.common import TestTempDir from fedeca.utils import ( Experiment, make_accuracy_function, make_substrafl_torch_dataset_class, ) from fedeca.utils.survival_utils import CoxData, make_categorical
11,906
"""Tests for DP training.""" # from substrafl.model_loading import download_algo_state # TODO increase rounds and an an assert to pooled equivalence as in # aper simulations class TestDPPropensityEnd2End(TestTempDir): """Webdisco tests class.""" @classmethod def setUpClass( cls, n_clients=3, ndim=10, nsamples=300, seed=43, ): """Initialize tests with data and FedIPTW object. Parameters ---------- n_clients : int, optional The number of clients, by default 3 nsamples : int, optional The number of patients in total. ndim : int, optional The number of dimensions, by default 10 initial_step_size : float, optional The first step size of NR descent, by default 0.95 seed : int, optional The seed, by default 43 standardize_data : bool, optional Whether or not to standardize data, by default True l1_ratio : float, optional The l1 ratio wrt L2., by default 0.0 penalizer : float, optional The weight for the elasticnet penalty, by default 0.0 learning_rate_strategy : str, optional How do we decrease the lr, by default "lifelines" """ super().setUpClass() cls.n_clients = n_clients rng = np.random.default_rng(seed) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=nsamples, ndim=ndim, prop_treated=0.5, propensity="linear", dtype="float32", # Strong linearity overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical
"""Tests for DP training.""" # from substrafl.model_loading import download_algo_state # TODO increase rounds and an an assert to pooled equivalence as in # aper simulations class TestDPPropensityEnd2End(TestTempDir): """Webdisco tests class.""" @classmethod def setUpClass( cls, n_clients=3, ndim=10, nsamples=300, seed=43, ): """Initialize tests with data and FedIPTW object. Parameters ---------- n_clients : int, optional The number of clients, by default 3 nsamples : int, optional The number of patients in total. ndim : int, optional The number of dimensions, by default 10 initial_step_size : float, optional The first step size of NR descent, by default 0.95 seed : int, optional The seed, by default 43 standardize_data : bool, optional Whether or not to standardize data, by default True l1_ratio : float, optional The l1 ratio wrt L2., by default 0.0 penalizer : float, optional The weight for the elasticnet penalty, by default 0.0 learning_rate_strategy : str, optional How do we decrease the lr, by default "lifelines" """ super().setUpClass() cls.n_clients = n_clients rng = np.random.default_rng(seed) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=nsamples, ndim=ndim, prop_treated=0.5, propensity="linear", dtype="float32", # Strong linearity overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical
Xcat, Xcont = make_categorical(X, up_to=0)
7
2023-11-27 18:01:37+00:00
16k
cmu-ci-lab/volumetric_opaque_solids
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.scale_mats_np = []\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = []\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n P = world_mat @ scale_mat\n P = P[:3, :4]\n intrinsics, pose = load_K_Rt_from_P(None, P)\n self.intrinsics_all.append(torch.from_numpy(intrinsics).float())\n self.pose_all.append(torch.from_numpy(pose).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n\n print('Load data: End')\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n pixels_x = torch.randint(low=0, high=self.W, size=[batch_size])\n pixels_y = torch.randint(low=0, high=self.H, size=[batch_size])\n color = self.images[img_idx][(pixels_y.cpu(), pixels_x.cpu())] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y.cpu(), pixels_x.cpu())] # batch_size, 3\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)" }, { "identifier": "PointSampler", "path": "models/sampler.py", "snippet": "class PointSampler:\n def __init__(self,\n n_sdf_pts = 1024,\n n_fg_samples = 28,\n n_surf_samples = 8,\n n_bg_samples = 28,\n n_outside = 32,\n use_random_binary_search = False,\n use_sdf_offset = False):\n # number of initial evaluations of sdf along each ray\n self.n_sdf_pts = n_sdf_pts\n self.n_sdf_samples = 10\n\n # number of points sampled per ray in the foreground, surface interval, and background\n self.n_fg_samples = n_fg_samples\n self.n_surf_samples = n_surf_samples\n self.n_bg_samples = n_bg_samples\n\n # total number of (primary, non-background) samples along each ray\n self.n_total_samples = n_fg_samples + n_surf_samples + n_bg_samples\n\n # number of points sampled per ray in background\n self.n_outside = n_outside\n\n self.use_random_binary_search = use_random_binary_search\n self.use_sdf_offset = use_sdf_offset\n\n def eval_at_points(self, rays_o, rays_d, depth, f):\n pts = rays_o[:, None, :] + rays_d[:, None, :] * depth[:, :, None]\n with torch.no_grad():\n val = f(pts.reshape(-1, 3)).reshape(depth.shape[0], depth.shape[1]).squeeze(dim=-1)\n return val\n\n def sample_interval_with_random_binary_search(self, num_samples, start, stop, rays_o, rays_d, f):\n '''\n Performs a random binary search for the x such that f(x) = 0 given f(start) > 0 and f(stop) < 0\n returns the entire sequence of sampled points, sorted from smallest to largest z val.\n '''\n current_min, current_max = start, stop\n samples = torch.zeros((start.shape[0], num_samples))\n uniform_random = torch.rand(samples.shape)\n for i in range(num_samples):\n samples[:, i] = (current_max - current_min) * uniform_random[:, i] + current_min\n f_val = self.eval_at_points(rays_o, rays_d, samples[:, i].unsqueeze(dim=1), f)\n current_min = torch.where(f_val <= 0, current_min, samples[:, i])\n current_max = torch.where(f_val <= 0, samples[:, i], current_max)\n return torch.sort(samples)[0]\n \n def sample_interval_uniformly(self, n, start, stop):\n start = start if len(start.shape) == 1 else start.squeeze(dim=-1)\n stop = stop if len(stop.shape) == 1 else stop.squeeze(dim=-1)\n x = torch.linspace(0, 1.0 - 1.0 / n, n)[None, :]\n x = x * (stop - start)[:, None] + start[:, None]\n x += (torch.rand(start.shape[0]) * (stop - start) / n)[:, None]\n return x\n\n def _dense_sdf_evaluation(self, rays_o, rays_d, near, far, sdf_func):\n uniform_z = torch.linspace(0.0, 1.0, self.n_sdf_pts + 1)\n z = near + (far - near) * uniform_z[None, :]\n return z, self.eval_at_points(rays_o, rays_d, z, sdf_func)\n\n def _find_first_zero_crossing(self, sdf):\n prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]\n sign_change = (next_sdf * prev_sdf < 0).long()\n return sign_change.argmax(1).long()\n\n def _compute_surface_z_bound(self, isect_idx, z, near, far):\n z_bounds = torch.gather(z, dim=1, index=torch.cat([isect_idx[:, None], isect_idx[:, None]+1], dim=1)).squeeze(dim=-1)\n return z_bounds[:, 0], z_bounds[:, 1]\n\n def sample_intersection(self, rays_o, rays_d, near, far, sdf_func, inv_std):\n with torch.no_grad():\n z, sdf = self._dense_sdf_evaluation(rays_o, rays_d, near, far, sdf_func)\n if self.use_sdf_offset:\n sdf += torch.normal(0, 1.0 / inv_std)\n\n isect_idx = self._find_first_zero_crossing(sdf) \n surf_lower, surf_upper = self._compute_surface_z_bound(isect_idx, z, near, far)\n \n has_isect = (isect_idx > 0).bool()\n no_isect = torch.logical_not(has_isect)\n\n # final depth samples buffers\n z_vals = torch.empty((rays_o.shape[0], self.n_total_samples))\n\n # depth map for visualization\n surf_z_image = torch.zeros_like(rays_o)\n\n if torch.any(has_isect):\n fg_z = self.sample_interval_uniformly(self.n_fg_samples, near[has_isect], surf_lower[has_isect])\n bg_z = self.sample_interval_uniformly(self.n_bg_samples, surf_upper[has_isect], far[has_isect])\n if not self.use_random_binary_search:\n surf_z = self.sample_interval_uniformly(self.n_surf_samples, surf_lower[has_isect], surf_upper[has_isect])\n else:\n surf_z = self.sample_interval_with_random_binary_search(self.n_surf_samples,\n surf_lower[has_isect],\n surf_upper[has_isect],\n rays_o[has_isect],\n rays_d[has_isect],\n sdf_func)\n z_vals[has_isect, :] = torch.cat([fg_z, surf_z, bg_z], dim=-1)\t\n \n # return z-val in image for debugging\n surf_lower_unit_z = (surf_lower - near.squeeze()) / (far - near).squeeze()\n surf_z_image[has_isect, :] = surf_lower_unit_z[has_isect, None].repeat(1, 3)\n\n if torch.any(no_isect):\n z_vals[no_isect, :] = self.sample_interval_uniformly(self.n_total_samples, near[no_isect], far[no_isect])\n \n return z_vals, surf_z_image \n\n def sample_outside(self, rays_o, rays_d, far):\t\n # Same as NeuS: https://github.com/Totoro97/NeuS/blob/6f96f96005d72a7a358379d2b576c496a1ab68dd/models/renderer.py#L292C19-L313\n if self.n_outside == 0:\n return None\n batch_size = len(rays_o)\n z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside)\n mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1])\n upper = torch.cat([mids, z_vals_outside[..., -1:]], -1)\n lower = torch.cat([z_vals_outside[..., :1], mids], -1)\n t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]])\n z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand\n z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_total_samples\n return z_vals_outside" }, { "identifier": "RenderingNetwork", "path": "models/fields.py", "snippet": "class RenderingNetwork(nn.Module):\n def __init__(self,\n d_feature,\n mode,\n d_in,\n d_out,\n d_hidden,\n n_layers,\n weight_norm=True,\n multires_view=0,\n squeeze_out=True):\n super().__init__()\n\n self.mode = mode\n self.squeeze_out = squeeze_out\n dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]\n\n self.embedview_fn = None\n if multires_view > 0:\n embedview_fn, input_ch = get_embedder(multires_view)\n self.embedview_fn = embedview_fn\n dims[0] += (input_ch - 3)\n\n self.num_layers = len(dims)\n\n for l in range(0, self.num_layers - 1):\n out_dim = dims[l + 1]\n lin = nn.Linear(dims[l], out_dim)\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.relu = nn.ReLU()\n\n def forward(self, points, normals, view_dirs, feature_vectors):\n if self.embedview_fn is not None:\n view_dirs = self.embedview_fn(view_dirs)\n\n rendering_input = None\n\n if self.mode == 'idr':\n rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_view_dir':\n rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_normal':\n rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)\n\n x = rendering_input\n\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.relu(x)\n\n if self.squeeze_out:\n x = torch.sigmoid(x)\n return x" }, { "identifier": "SDFNetwork", "path": "models/fields.py", "snippet": "class SDFNetwork(nn.Module):\n def __init__(self,\n d_in,\n d_out,\n d_hidden,\n n_layers,\n skip_in=(4,),\n multires=0,\n bias=0.5,\n scale=1,\n geometric_init=True,\n weight_norm=True,\n inside_outside=False):\n super(SDFNetwork, self).__init__()\n\n dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]\n\n self.embed_fn_fine = None\n if multires > 0:\n embed_fn, input_ch = get_embedder(multires, input_dims=d_in)\n self.embed_fn_fine = embed_fn\n dims[0] = input_ch\n\n self.num_layers = len(dims)\n self.skip_in = skip_in\n self.scale = scale\n\n for l in range(0, self.num_layers - 1):\n if l + 1 in self.skip_in:\n out_dim = dims[l + 1] - dims[0]\n else:\n out_dim = dims[l + 1]\n\n lin = nn.Linear(dims[l], out_dim)\n\n if geometric_init:\n if l == self.num_layers - 2:\n if not inside_outside:\n torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, -bias)\n else:\n torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, bias)\n elif multires > 0 and l == 0:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.constant_(lin.weight[:, 3:], 0.0)\n torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))\n elif multires > 0 and l in self.skip_in:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0)\n else:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.activation = nn.Softplus(beta=100)\n\n def forward(self, inputs):\n inputs = inputs * self.scale\n if self.embed_fn_fine is not None:\n inputs = self.embed_fn_fine(inputs)\n\n x = inputs\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n if l in self.skip_in:\n x = torch.cat([x, inputs], 1) / np.sqrt(2)\n\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.activation(x)\n return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1)\n\n def sdf(self, x):\n return self.forward(x)[:, :1]\n\n def sdf_hidden_appearance(self, x):\n return self.forward(x)\n\n def sdf_with_gradient(self, x):\n x.requires_grad_(True)\n sdf_out = self.forward(x)\n sdf = sdf_out[:, :1]\n features = sdf_out[:, 1:]\n d_output = torch.ones_like(sdf, requires_grad=False, device=sdf.device)\n gradients = torch.autograd.grad(\n outputs=sdf,\n inputs=x,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return sdf, features, gradients" }, { "identifier": "SingleVarianceNetwork", "path": "models/fields.py", "snippet": "class SingleVarianceNetwork(nn.Module):\n def __init__(self, init_val):\n super(SingleVarianceNetwork, self).__init__()\n self.register_parameter('variance', nn.Parameter(torch.tensor(init_val)))\n\n def forward(self, x):\n return torch.ones([len(x), 1]) * torch.exp(self.variance * 10.0)" }, { "identifier": "NeRF", "path": "models/fields.py", "snippet": "class NeRF(nn.Module):\n def __init__(self,\n D=8,\n W=256,\n d_in=3,\n d_in_view=3,\n multires=0,\n multires_view=0,\n output_ch=4,\n skips=[4],\n use_viewdirs=False):\n super(NeRF, self).__init__()\n self.D = D\n self.W = W\n self.d_in = d_in\n self.d_in_view = d_in_view\n self.input_ch = 3\n self.input_ch_view = 3\n self.embed_fn = None\n self.embed_fn_view = None\n\n if multires > 0:\n embed_fn, input_ch = get_embedder(multires, input_dims=d_in)\n self.embed_fn = embed_fn\n self.input_ch = input_ch\n\n if multires_view > 0:\n embed_fn_view, input_ch_view = get_embedder(multires_view, input_dims=d_in_view)\n self.embed_fn_view = embed_fn_view\n self.input_ch_view = input_ch_view\n\n self.skips = skips\n self.use_viewdirs = use_viewdirs\n\n self.pts_linears = nn.ModuleList(\n [nn.Linear(self.input_ch, W)] +\n [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + self.input_ch, W) for i in range(D - 1)])\n\n ### Implementation according to the official code release\n ### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105)\n self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)])\n\n ### Implementation according to the paper\n # self.views_linears = nn.ModuleList(\n # [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)])\n\n if use_viewdirs:\n self.feature_linear = nn.Linear(W, W)\n self.alpha_linear = nn.Linear(W, 1)\n self.rgb_linear = nn.Linear(W // 2, 3)\n else:\n self.output_linear = nn.Linear(W, output_ch)\n\n def forward(self, input_pts, input_views):\n if self.embed_fn is not None:\n input_pts = self.embed_fn(input_pts)\n if self.embed_fn_view is not None:\n input_views = self.embed_fn_view(input_views)\n\n h = input_pts\n for i, l in enumerate(self.pts_linears):\n h = self.pts_linears[i](h)\n h = F.relu(h)\n if i in self.skips:\n h = torch.cat([input_pts, h], -1)\n\n if self.use_viewdirs:\n alpha = self.alpha_linear(h)\n feature = self.feature_linear(h)\n h = torch.cat([feature, input_views], -1)\n\n for i, l in enumerate(self.views_linears):\n h = self.views_linears[i](h)\n h = F.relu(h)\n\n rgb = self.rgb_linear(h)\n return alpha, rgb\n else:\n assert False" }, { "identifier": "AnisotropyNetwork", "path": "models/fields.py", "snippet": "class AnisotropyNetwork(nn.Module):\n def __init__(self, d_feature):\n super(AnisotropyNetwork, self).__init__()\n self.anisotropy_layer = nn.Linear(d_feature, 1)\n self.anisotropy_activation = lambda x: 1.0 - torch.sigmoid(x)\n \n def forward(self, x):\n out = self.anisotropy_layer(x)\n return self.anisotropy_activation(out)" }, { "identifier": "AttenuationCoefficient", "path": "models/attenuation_coefficient.py", "snippet": "class AttenuationCoefficient:\n def __init__(self, \n implicit_distribution = 'gaussian', \n normal_distribution = 'linear_mixture'):\n self.implicit_distribution = implicit_distribution\n self.normal_distribution = normal_distribution\n self.density = Density.get(implicit_distribution)\n self.projected_area = ProjectedArea.get(normal_distribution)\n\n def __call__(self, ray_dir, mean_implicit, grad_mean_implicit, inv_std, anisotropy_param):\n sigma_perp = self.projected_area(ray_dir, grad_mean_implicit, anisotropy_param)\n sigma_parallel = self.density(mean_implicit, inv_std)\n return sigma_perp * sigma_parallel" }, { "identifier": "Renderer", "path": "models/renderer.py", "snippet": "class Renderer:\n def __init__(self,\n nerf,\n sdf_network,\n deviation_network,\n color_network,\n anisotropy_network,\n attenuation_coefficient,\n sampler):\n self.nerf = nerf\n self.sdf_network = sdf_network\n self.deviation_network = deviation_network\n self.color_network = color_network\n self.anisotropy_network = anisotropy_network\n self.attenuation_coefficient = attenuation_coefficient\n self.sampler = sampler\n\n def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None):\n \"\"\"\n Render background\n \"\"\"\n batch_size, n_samples = z_vals.shape\n\n # section length\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)\n mid_z_vals = z_vals + dists * 0.5\n\n # section midpoints\n pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3\n\n dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10)\n pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4\n\n dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3)\n\n pts = pts.reshape(-1, 4)\n dirs = dirs.reshape(-1, 3)\n\n # query neural fields\n density, sampled_color = nerf(pts, dirs)\n sampled_color = torch.sigmoid(sampled_color)\n\n # compute alpha\n alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists)\n alpha = alpha.reshape(batch_size, n_samples)\n\n # aggregate along rays\n weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n sampled_color = sampled_color.reshape(batch_size, n_samples, 3)\n color = (weights[:, :, None] * sampled_color).sum(dim=1)\n if background_rgb is not None:\n color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True))\n\n return {\n 'color': color,\n 'sampled_color': sampled_color,\n 'alpha': alpha,\n 'weights': weights,\n }\n\n def render_core(self,\n rays_o,\n rays_d,\n z_vals,\n sample_dist,\n sdf_network,\n deviation_network,\n color_network,\n background_alpha=None,\n background_sampled_color=None,\n background_rgb=None,\n annealed_anisotropy=1.0):\n batch_size, n_samples = z_vals.shape\n\n # section length\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)\n mid_z_vals = z_vals + dists * 0.5\n\n # section midpoints\n pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3\n dirs = rays_d[:, None, :].expand(pts.shape)\n\n pts = pts.reshape(-1, 3)\n dirs = dirs.reshape(-1, 3)\n \n # query neural fields\n sdf, feature_vector, sdf_gradients = sdf_network.sdf_with_gradient(pts)\n sampled_color = color_network(pts, sdf_gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3)\n inv_s = deviation_network(sdf).clip(1e-6, 1e6)\n\n anisotropy_param = annealed_anisotropy\n if self.anisotropy_network is not None:\n anisotropy_param = self.anisotropy_network(feature_vector) \n\n # compute transmittance based on SOS\n interval_lengths = dists.reshape(-1, 1)\n sigma = self.attenuation_coefficient(dirs, sdf, sdf_gradients, inv_s, anisotropy_param)\n alpha = 1.0 - torch.exp(-sigma * interval_lengths).reshape(batch_size, n_samples)\n \n pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples)\n inside_sphere = (pts_norm < 1.0).float().detach()\n relax_inside_sphere = (pts_norm < 1.2).float().detach()\n\n # aggregate along rays\n if background_alpha is not None:\n alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere)\n alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1)\n sampled_color = sampled_color * inside_sphere[:, :, None] +\\\n background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None]\n sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1)\n\n weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n weights_sum = weights.sum(dim=-1, keepdim=True)\n\n color = (sampled_color * weights[:, :, None]).sum(dim=1)\n if background_rgb is not None: # Fixed background, usually black\n color = color + background_rgb * (1.0 - weights_sum)\n\n # Eikonal loss\n gradient_error = (torch.linalg.norm(sdf_gradients.reshape(batch_size, n_samples, 3), ord=2,\n dim=-1) - 1.0) ** 2\n gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5)\n\n return {\n 'color': color,\n 'sdf': sdf,\n 'dists': dists,\n 'gradients': sdf_gradients.reshape(batch_size, n_samples, 3),\n 's_val': 1.0 / inv_s,\n 'mid_z_vals': mid_z_vals,\n 'weights': weights,\n 'gradient_error': gradient_error,\n 'inside_sphere': inside_sphere\n }\n\n def render(self, rays_o, rays_d, near, far, background_rgb=None, annealed_anisotropy=1.0):\n # sample points along rays\n inv_s = self.deviation_network(torch.tensor([1])).clip(1e-6, 1e6)\n z_vals, surf_z_image = self.sampler.sample_intersection(rays_o, rays_d, near, far, self.sdf_network.sdf, inv_s)\n z_vals_outside = self.sampler.sample_outside(rays_o, rays_d, far)\n sample_dist = 2.0 / self.sampler.n_total_samples\n\n background_alpha = None\n background_sampled_color = None\n\n # Background model\n if z_vals_outside is not None:\n z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1)\n z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1)\n ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf)\n \n background_sampled_color = ret_outside['sampled_color']\n background_alpha = ret_outside['alpha']\n\n # render core\n ret_fine = self.render_core(rays_o,\n rays_d,\n z_vals,\n sample_dist,\n self.sdf_network,\n self.deviation_network,\n self.color_network,\n background_rgb=background_rgb,\n background_alpha=background_alpha,\n background_sampled_color=background_sampled_color,\n annealed_anisotropy=annealed_anisotropy)\n\n batch_size = len(rays_o)\n color_fine = ret_fine['color']\n weights = ret_fine['weights']\n weights_sum = weights.sum(dim=-1, keepdim=True)\n gradients = ret_fine['gradients']\n s_val = ret_fine['s_val'].reshape(batch_size, self.sampler.n_total_samples).mean(dim=-1, keepdim=True)\n\n return {\n 'color_fine': color_fine,\n 's_val': s_val,\n 'weight_sum': weights_sum,\n 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0],\n 'gradients': gradients,\n 'weights': weights,\n 'gradient_error': ret_fine['gradient_error'],\n 'inside_sphere': ret_fine['inside_sphere'],\n 'depth': surf_z_image.detach().cpu().numpy()\n } " }, { "identifier": "read_mesh", "path": "models/util.py", "snippet": "def read_mesh(path, quad = False):\n V = []\n F = []\n with open(path) as file:\n for line in file:\n tokens = line.strip('\\n').split(' ')\n if tokens[0] == 'v':\n V.append(np.array([float(tokens[1]), float(tokens[2]), float(tokens[3])]))\n \n if tokens[0] == 'f':\n if quad:\n F.append(np.array([int(tokens[1]), int(tokens[2]), int(tokens[3]), int(tokens[4])]))\n else:\n F.append(np.array([int(tokens[1]), int(tokens[2]), int(tokens[3])]))\n\n return np.array(V), np.array(F)" }, { "identifier": "write_mesh", "path": "models/util.py", "snippet": "def write_mesh(path, vertices, faces, data, quad = False):\n with open(path, 'w') as out:\n out.write('# OBJ file\\n')\n\n for i in range(vertices.shape[0]):\n out.write('v {:.8f} {:.8f} {:.8f} \\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2]))\n\n for i in range(data.shape[0]):\n out.write('vt {:.8f} 0 \\n'.format(data[i]))\n\n for i in range(faces.shape[0]):\n fi = faces[i, 0]\n fj = faces[i, 1]\n fk = faces[i, 2]\n if quad:\n fl = faces[i, 3]\n out.write('f {:d}/{:d} {:d}/{:d} {:d}/{:d} {:d}/{:d}\\n'.format(fi, fi, fj, fj, fk, fk, fl, fl))\n else:\n out.write('f {:d}/{:d} {:d}/{:d} {:d}/{:d}\\n'.format(fi, fi, fj, fj, fk, fk))" }, { "identifier": "extract_geometry", "path": "models/util.py", "snippet": "def extract_geometry(bound_min, bound_max, resolution, threshold, query_func):\n print('threshold: {}'.format(threshold))\n u = extract_fields(bound_min, bound_max, resolution, query_func)\n vertices, triangles = mcubes.marching_cubes(u, threshold)\n b_max_np = bound_max.detach().cpu().numpy()\n b_min_np = bound_min.detach().cpu().numpy()\n vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]\n return vertices, triangles" } ]
import os import time import logging import argparse import numpy as np import cv2 as cv import trimesh import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset from models.sampler import PointSampler from models.fields import ( RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF, AnisotropyNetwork ) from models.attenuation_coefficient import AttenuationCoefficient from models.renderer import Renderer from models.util import read_mesh, write_mesh, extract_geometry
12,873
logging.info('End') def save_checkpoint(self): checkpoint = { 'sdf_network_fine': self.sdf_network.state_dict(), 'variance_network_fine': self.deviation_network.state_dict(), 'color_network_fine': self.color_network.state_dict(), 'optimizer': self.optimizer.state_dict(), 'iter_step': self.iter_step, } if self.nerf_outside is not None: checkpoint['nerf'] = self.nerf_outside.state_dict() if self.anisotropy_network is not None: checkpoint['anisotropy_network_fine'] = self.anisotropy_network.state_dict() os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True) torch.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step))) def validate_image(self, idx=-1, resolution_level=-1): if idx < 0: idx = np.random.randint(self.dataset.n_images) print('Validate: iter: {}, camera: {}'.format(self.iter_step, idx)) if resolution_level < 0: resolution_level = self.validate_resolution_level rays_o, rays_d = self.dataset.gen_rays_at(idx, resolution_level=resolution_level) H, W, _ = rays_o.shape rays_o = rays_o.reshape(-1, 3).split(self.batch_size) rays_d = rays_d.reshape(-1, 3).split(self.batch_size) out_rgb_fine = [] out_normal_fine = [] out_depth = [] for rays_o_batch, rays_d_batch in zip(rays_o, rays_d): near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch) background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None render_out = self.renderer.render(rays_o_batch, rays_d_batch, near, far, background_rgb=background_rgb, annealed_anisotropy=self.get_annealed_anisotropy()) def feasible(key): return (key in render_out) and (render_out[key] is not None) if feasible('color_fine'): out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy()) if feasible('gradients') and feasible('weights'): n_samples = self.point_sampler.n_total_samples normals = render_out['gradients'] * render_out['weights'][:, :n_samples, None] if feasible('inside_sphere'): normals = normals * render_out['inside_sphere'][..., None] normals = normals.sum(dim=1).detach().cpu().numpy() out_normal_fine.append(normals) if feasible('depth'): out_depth.append(render_out['depth']) del render_out img_fine = None if len(out_rgb_fine) > 0: img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256).clip(0, 255) normal_img = None if len(out_normal_fine) > 0: normal_img = np.concatenate(out_normal_fine, axis=0) rot = np.linalg.inv(self.dataset.pose_all[idx, :3, :3].detach().cpu().numpy()) normal_img = (np.matmul(rot[None, :, :], normal_img[:, :, None]) .reshape([H, W, 3, -1]) * 128 + 128).clip(0, 255) depth_fine = None if len(out_depth) > 0: depth_img = (np.concatenate(out_depth, axis=0).reshape([H, W, 3]) * 256).clip(0, 255).astype(np.uint8) depth_img = cv.applyColorMap(depth_img, cv.COLORMAP_MAGMA) os.makedirs(os.path.join(self.base_exp_dir, 'validations_fine'), exist_ok=True) os.makedirs(os.path.join(self.base_exp_dir, 'normals'), exist_ok=True) os.makedirs(os.path.join(self.base_exp_dir, 'depth'), exist_ok=True) if self.mode == 'render': os.makedirs(os.path.join(self.base_exp_dir, 'renders'), exist_ok=True) for i in range(img_fine.shape[-1]): if len(out_rgb_fine) > 0: if self.mode == 'render': cv.imwrite(os.path.join(self.base_exp_dir, 'renders', 'render_{}.png'.format(idx)), img_fine[..., i]) else: cv.imwrite(os.path.join(self.base_exp_dir, 'validations_fine', '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), np.concatenate([img_fine[..., i], self.dataset.image_at(idx, resolution_level=resolution_level)])) if len(out_normal_fine) > 0: cv.imwrite(os.path.join(self.base_exp_dir, 'normals', '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), normal_img[..., i]) if len(out_depth) > 0: cv.imwrite(os.path.join(self.base_exp_dir, 'depth', '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), depth_img) def validate_mesh(self, scale_mesh=True, resolution=64, threshold=0.0): bound_min = torch.tensor(self.dataset.object_bbox_min, dtype=torch.float32) bound_max = torch.tensor(self.dataset.object_bbox_max, dtype=torch.float32) query_func = lambda pts: -self.sdf_network.sdf(pts)
logging.getLogger('matplotlib.font_manager').disabled = True class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False, max_n_training_images=-1): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.dataset = Dataset(self.conf['dataset']) self.iter_step = 0 # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq') self.viz_deviation_freq = self.conf.get_int('train.viz_deviation_freq', 0) self.batch_size = self.conf.get_int('train.batch_size') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.max_n_training_images = max_n_training_images # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Networks params_to_train = [] self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device) self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device) self.color_network = RenderingNetwork(**self.conf['model.rendering_network']).to(self.device) params_to_train += list(self.sdf_network.parameters()) params_to_train += list(self.deviation_network.parameters()) params_to_train += list(self.color_network.parameters()) # optionally initialize a background NeRF network self.nerf_outside = None renders_background = 'model.point_sampler.n_outside' not in self.conf or self.conf['model.point_sampler.n_outside'] > 0 if renders_background: self.nerf_outside = NeRF(**self.conf['model.nerf']).to(self.device) params_to_train += list(self.nerf_outside.parameters()) # optionally initialize a layer to learn a spatially varying anisotropy self.anisotropy_network = None if 'model.anisotropy_network' in self.conf: self.anisotropy_network = AnisotropyNetwork(**self.conf['model.anisotropy_network']).to(self.device) params_to_train += list(self.anisotropy_network.parameters()) self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) self.atten_coeff = AttenuationCoefficient(**self.conf['model.attenuation_coefficient']) self.point_sampler = PointSampler(**self.conf['model.point_sampler']) self.renderer = Renderer(self.nerf_outside, self.sdf_network, self.deviation_network, self.color_network, self.anisotropy_network, self.atten_coeff, self.point_sampler) # Load checkpoint latest_model_name = None if is_continue: model_list_raw = os.listdir(os.path.join(self.base_exp_dir, 'checkpoints')) model_list = [] for model_name in model_list_raw: if model_name[-3:] == 'pth' and int(model_name[5:-4]) <= self.end_iter: model_list.append(model_name) model_list.sort() latest_model_name = model_list[-1] if latest_model_name is not None: logging.info('Find checkpoint: {}'.format(latest_model_name)) self.load_checkpoint(latest_model_name) # Backup codes and configs for debug if self.mode[:5] == 'train': self.file_backup() def train(self): self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) self.update_learning_rate() res_step = self.end_iter - self.iter_step image_perm = self.get_image_perm() for iter_i in tqdm(range(res_step)): data = self.dataset.gen_random_rays_at(image_perm[self.iter_step % len(image_perm)], self.batch_size) rays_o, rays_d, true_rgb, mask = data[:, :3], data[:, 3: 6], data[:, 6: 9], data[:, 9: 10] near, far = self.dataset.near_far_from_sphere(rays_o, rays_d) background_rgb = None if self.use_white_bkgd: background_rgb = torch.ones([1, 3]) if self.mask_weight > 0.0: mask = (mask > 0.5).float() else: mask = torch.ones_like(mask) mask_sum = mask.sum() + 1e-5 render_out = self.renderer.render(rays_o, rays_d, near, far, background_rgb=background_rgb, annealed_anisotropy=self.get_annealed_anisotropy()) color_fine = render_out['color_fine'] s_val = render_out['s_val'] gradient_error = render_out['gradient_error'] weight_max = render_out['weight_max'] weight_sum = render_out['weight_sum'] # Loss color_error = (color_fine - true_rgb) * mask color_fine_loss = F.l1_loss(color_error, torch.zeros_like(color_error), reduction='sum') / mask_sum psnr = 20.0 * torch.log10(1.0 / (((color_fine - true_rgb)**2 * mask).sum() / (mask_sum * 3.0)).sqrt()) eikonal_loss = gradient_error mask_loss = F.binary_cross_entropy(weight_sum.clip(1e-3, 1.0 - 1e-3), mask) loss = color_fine_loss +\ eikonal_loss * self.igr_weight +\ mask_loss * self.mask_weight self.optimizer.zero_grad() loss.backward() self.optimizer.step() self.iter_step += 1 self.writer.add_scalar('Loss/loss', loss, self.iter_step) self.writer.add_scalar('Loss/color_loss', color_fine_loss, self.iter_step) self.writer.add_scalar('Loss/eikonal_loss', eikonal_loss, self.iter_step) self.writer.add_scalar('Statistics/s_val', s_val.mean(), self.iter_step) self.writer.add_scalar('Statistics/weight_max', (weight_max * mask).sum() / mask_sum, self.iter_step) self.writer.add_scalar('Statistics/psnr', psnr, self.iter_step) if self.iter_step % self.report_freq == 0: print(self.base_exp_dir) print('iter:{:8>d} loss = {} lr={}'.format(self.iter_step, loss, self.optimizer.param_groups[0]['lr'])) if self.iter_step % self.save_freq == 0: self.save_checkpoint() if self.iter_step % self.val_freq == 0: self.validate_image() if self.iter_step % self.val_mesh_freq == 0: self.validate_mesh() if self.viz_deviation_freq > 0 and self.iter_step % self.viz_deviation_freq == 0: self.visualize_deviation_network() self.update_learning_rate() if self.iter_step % len(image_perm) == 0: image_perm = self.get_image_perm() def get_image_perm(self): if self.max_n_training_images <= 0: return torch.randperm(self.dataset.n_images) else: return torch.randperm(min(self.dataset.n_images, self.max_n_training_images)) def get_annealed_anisotropy(self): # goes from 0 (uniform / isotropic / rough) --> 1 (delta / anisotropic / smooth) if self.anneal_end == 0: return 1.0 else: return np.min([1.0, self.iter_step / self.anneal_end]) def update_learning_rate(self): if self.iter_step < self.warm_up_end: learning_factor = self.iter_step / self.warm_up_end else: alpha = self.learning_rate_alpha progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end) learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha for g in self.optimizer.param_groups: g['lr'] = self.learning_rate * learning_factor def file_backup(self): dir_lis = self.conf['general.recording'] os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True) for dir_name in dir_lis: cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name) os.makedirs(cur_dir, exist_ok=True) files = os.listdir(dir_name) for f_name in files: if f_name[-3:] == '.py': copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name)) copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf')) def load_checkpoint(self, checkpoint_name): checkpoint = torch.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name), map_location=self.device) self.sdf_network.load_state_dict(checkpoint['sdf_network_fine']) self.color_network.load_state_dict(checkpoint['color_network_fine']) self.deviation_network.load_state_dict(checkpoint['variance_network_fine']) self.optimizer.load_state_dict(checkpoint['optimizer']) self.iter_step = checkpoint['iter_step'] if 'nerf' in checkpoint: self.nerf_outside.state_dict(checkpoint['nerf']) if 'anisotropy_network_fine' in checkpoint: self.anisotropy_network.state_dict(checkpoint['anisotropy_network_fine']) logging.info('End') def save_checkpoint(self): checkpoint = { 'sdf_network_fine': self.sdf_network.state_dict(), 'variance_network_fine': self.deviation_network.state_dict(), 'color_network_fine': self.color_network.state_dict(), 'optimizer': self.optimizer.state_dict(), 'iter_step': self.iter_step, } if self.nerf_outside is not None: checkpoint['nerf'] = self.nerf_outside.state_dict() if self.anisotropy_network is not None: checkpoint['anisotropy_network_fine'] = self.anisotropy_network.state_dict() os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True) torch.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step))) def validate_image(self, idx=-1, resolution_level=-1): if idx < 0: idx = np.random.randint(self.dataset.n_images) print('Validate: iter: {}, camera: {}'.format(self.iter_step, idx)) if resolution_level < 0: resolution_level = self.validate_resolution_level rays_o, rays_d = self.dataset.gen_rays_at(idx, resolution_level=resolution_level) H, W, _ = rays_o.shape rays_o = rays_o.reshape(-1, 3).split(self.batch_size) rays_d = rays_d.reshape(-1, 3).split(self.batch_size) out_rgb_fine = [] out_normal_fine = [] out_depth = [] for rays_o_batch, rays_d_batch in zip(rays_o, rays_d): near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch) background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None render_out = self.renderer.render(rays_o_batch, rays_d_batch, near, far, background_rgb=background_rgb, annealed_anisotropy=self.get_annealed_anisotropy()) def feasible(key): return (key in render_out) and (render_out[key] is not None) if feasible('color_fine'): out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy()) if feasible('gradients') and feasible('weights'): n_samples = self.point_sampler.n_total_samples normals = render_out['gradients'] * render_out['weights'][:, :n_samples, None] if feasible('inside_sphere'): normals = normals * render_out['inside_sphere'][..., None] normals = normals.sum(dim=1).detach().cpu().numpy() out_normal_fine.append(normals) if feasible('depth'): out_depth.append(render_out['depth']) del render_out img_fine = None if len(out_rgb_fine) > 0: img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256).clip(0, 255) normal_img = None if len(out_normal_fine) > 0: normal_img = np.concatenate(out_normal_fine, axis=0) rot = np.linalg.inv(self.dataset.pose_all[idx, :3, :3].detach().cpu().numpy()) normal_img = (np.matmul(rot[None, :, :], normal_img[:, :, None]) .reshape([H, W, 3, -1]) * 128 + 128).clip(0, 255) depth_fine = None if len(out_depth) > 0: depth_img = (np.concatenate(out_depth, axis=0).reshape([H, W, 3]) * 256).clip(0, 255).astype(np.uint8) depth_img = cv.applyColorMap(depth_img, cv.COLORMAP_MAGMA) os.makedirs(os.path.join(self.base_exp_dir, 'validations_fine'), exist_ok=True) os.makedirs(os.path.join(self.base_exp_dir, 'normals'), exist_ok=True) os.makedirs(os.path.join(self.base_exp_dir, 'depth'), exist_ok=True) if self.mode == 'render': os.makedirs(os.path.join(self.base_exp_dir, 'renders'), exist_ok=True) for i in range(img_fine.shape[-1]): if len(out_rgb_fine) > 0: if self.mode == 'render': cv.imwrite(os.path.join(self.base_exp_dir, 'renders', 'render_{}.png'.format(idx)), img_fine[..., i]) else: cv.imwrite(os.path.join(self.base_exp_dir, 'validations_fine', '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), np.concatenate([img_fine[..., i], self.dataset.image_at(idx, resolution_level=resolution_level)])) if len(out_normal_fine) > 0: cv.imwrite(os.path.join(self.base_exp_dir, 'normals', '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), normal_img[..., i]) if len(out_depth) > 0: cv.imwrite(os.path.join(self.base_exp_dir, 'depth', '{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)), depth_img) def validate_mesh(self, scale_mesh=True, resolution=64, threshold=0.0): bound_min = torch.tensor(self.dataset.object_bbox_min, dtype=torch.float32) bound_max = torch.tensor(self.dataset.object_bbox_max, dtype=torch.float32) query_func = lambda pts: -self.sdf_network.sdf(pts)
vertices, triangles = extract_geometry(bound_min, bound_max,
11
2023-11-28 03:13:44+00:00
16k
weijiawu/CisDQ
mask2former_video/data_video/ytvis_eval.py
[ { "identifier": "YTVOS", "path": "mask2former_video/data_video/datasets/ytvis_api/ytvos.py", "snippet": "class YTVOS:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset,self.anns,self.cats,self.vids = dict(),dict(),dict(),dict()\n self.vidToAnns, self.catToVids = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()\n\n def createIndex(self):\n # create index\n print('creating index...')\n anns, cats, vids = {}, {}, {}\n vidToAnns,catToVids = defaultdict(list),defaultdict(list)\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n vidToAnns[ann['video_id']].append(ann)\n anns[ann['id']] = ann\n\n if 'videos' in self.dataset:\n for vid in self.dataset['videos']:\n vids[vid['id']] = vid\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n\n if 'annotations' in self.dataset and 'categories' in self.dataset:\n for ann in self.dataset['annotations']:\n catToVids[ann['category_id']].append(ann['video_id'])\n\n print('index created!')\n\n # create class members\n self.anns = anns\n self.vidToAnns = vidToAnns\n self.catToVids = catToVids\n self.vids = vids\n self.cats = cats\n\n def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))\n\n def getAnnIds(self, vidIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param vidIds (int array) : get anns for given vids\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(vidIds) == 0:\n lists = [self.vidToAnns[vidId] for vidId in vidIds if vidId in self.vidToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['avg_area'] > areaRng[0] and ann['avg_area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids\n\n def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n supNms = supNms if _isArrayLike(supNms) else [supNms]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids\n\n def getVidIds(self, vidIds=[], catIds=[]):\n '''\n Get vid ids that satisfy given filter conditions.\n :param vidIds (int array) : get vids for given ids\n :param catIds (int array) : get vids with all given cats\n :return: ids (int array) : integer array of vid ids\n '''\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == 0:\n ids = self.vids.keys()\n else:\n ids = set(vidIds)\n for i, catId in enumerate(catIds):\n if i == 0 and len(ids) == 0:\n ids = set(self.catToVids[catId])\n else:\n ids &= set(self.catToVids[catId])\n return list(ids)\n\n def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]\n\n def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]\n\n def loadVids(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying vid\n :return: vids (object array) : loaded vid objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.vids[id] for id in ids]\n elif type(ids) == int:\n return [self.vids[ids]]\n\n\n def loadRes(self, resFile):\n \"\"\"\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = YTVOS()\n res.dataset['videos'] = [img for img in self.dataset['videos']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsVidIds = [ann['video_id'] for ann in anns]\n assert set(annsVidIds) == (set(annsVidIds) & set(self.getVidIds())), \\\n 'Results do not correspond to current coco set'\n if 'segmentations' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n ann['areas'] = []\n if not 'bboxes' in ann:\n ann['bboxes'] = []\n for seg in ann['segmentations']:\n # now only support compressed RLE format as segmentation results\n if seg:\n ann['areas'].append(maskUtils.area(seg))\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(maskUtils.toBbox(seg))\n else:\n ann['areas'].append(None)\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(None)\n ann['id'] = id+1\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n ann['iscrowd'] = 0\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res\n\n def annToRLE(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n t = self.vids[ann['video_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentations'][frameId]\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = segm\n return rle\n\n def annToMask(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann, frameId)\n m = maskUtils.decode(rle)\n return m" }, { "identifier": "YTVOSeval", "path": "mask2former_video/data_video/datasets/ytvis_api/ytvoseval.py", "snippet": "class YTVOSeval:\n # Interface for evaluating video instance segmentation on the YouTubeVIS dataset.\n #\n # The usage for YTVOSeval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = YTVOSeval(cocoGt,cocoDt); # initialize YTVOSeval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.params = {} # evaluation parameters\n self.evalVids = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.vidIds = sorted(cocoGt.getVidIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n for i, a in enumerate(ann['segmentations']):\n if a:\n rle = coco.annToRLE(ann, i)\n ann['segmentations'][i] = rle\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['video_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['video_id'], dt['category_id']].append(dt)\n self.evalVids = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalVids\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.vidIds = list(np.unique(p.vidIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(vidId, catId): computeIoU(vidId, catId) \\\n for vidId in p.vidIds\n for catId in catIds}\n\n evaluateVid = self.evaluateVid\n maxDet = p.maxDets[-1]\n \n \n self.evalImgs = [evaluateVid(vidId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for vidId in p.vidIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, vidId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentations'] for g in gt]\n d = [d['segmentations'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bboxes'] for g in gt]\n d = [d['bboxes'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n #ious = maskUtils.iou(d,g,iscrowd)\n def iou_seq(d_seq, g_seq):\n i = .0\n u = .0\n for d, g in zip(d_seq, g_seq):\n if d and g:\n i += maskUtils.area(maskUtils.merge([d, g], True))\n u += maskUtils.area(maskUtils.merge([d, g], False))\n elif not d and g:\n u += maskUtils.area(g)\n elif d and not g:\n u += maskUtils.area(d)\n if not u > .0:\n print(\"Mask sizes in video {} and category {} may not match!\".format(vidId, catId))\n iou = i / u if u > .0 else .0\n return iou\n ious = np.zeros([len(d), len(g)])\n for i, j in np.ndindex(ious.shape):\n ious[i, j] = iou_seq(d[i], g[j])\n #print(vidId, catId, ious.shape, ious)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['avg_area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateVid(self, vidId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['avg_area']<aRng[0] or g['avg_area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[vidId, catId][:, gtind] if len(self.ious[vidId, catId]) > 0 else self.ious[vidId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['avg_area']<aRng[0] or d['avg_area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'video_id': vidId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.vidIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.vidIds) if i in setI]\n I0 = len(_pe.vidIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from .datasets.ytvis_api.ytvos import YTVOS from .datasets.ytvis_api.ytvoseval import YTVOSeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.evaluation import DatasetEvaluator from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table
12,549
Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR1", "AR10"] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format("segm") + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format("segm") + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json_video(inputs, outputs): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): video_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ assert len(inputs) == 1, "More than one inputs are loaded for inference!" video_id = inputs[0]["video_id"] video_length = inputs[0]["length"] scores = outputs["pred_scores"] labels = outputs["pred_labels"] masks = outputs["pred_masks"] ytvis_results = [] for instance_id, (s, l, m) in enumerate(zip(scores, labels, masks)): segms = [ mask_util.encode(np.array(_mask[:, :, None], order="F", dtype="uint8"))[0] for _mask in m ] for rle in segms: rle["counts"] = rle["counts"].decode("utf-8") res = { "video_id": video_id, "score": s, "category_id": l, "segmentations": segms, } ytvis_results.append(res) return ytvis_results def _evaluate_predictions_on_coco( coco_gt, coco_results, img_ids=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results)
# Copyright (c) Facebook, Inc. and its affiliates. # Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC class YTVISEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, use_fast_impl=True, ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file in torch serialization format that contains all the raw original predictions. 2. "coco_instances_results.json" a json file in COCO's result format. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl if tasks is not None and isinstance(tasks, CfgNode): self._logger.warning( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._ytvis_api = YTVOS(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._ytvis_api.dataset def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ prediction = instances_to_coco_json_video(inputs, outputs) self._predictions.extend(prediction) def evaluate(self): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() self._eval_predictions(predictions) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _eval_predictions(self, predictions): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for YTVIS format ...") # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in predictions: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: file_path = os.path.join(self._output_dir, "results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(predictions)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return coco_eval = ( _evaluate_predictions_on_coco( self._ytvis_api, predictions, ) if len(predictions) > 0 else None # cocoapi does not handle empty results very well ) res = self._derive_coco_results( coco_eval, class_names=self._metadata.get("thing_classes") ) self._results["segm"] = res def _derive_coco_results(self, coco_eval, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR1", "AR10"] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format("segm") + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format("segm") + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json_video(inputs, outputs): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): video_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ assert len(inputs) == 1, "More than one inputs are loaded for inference!" video_id = inputs[0]["video_id"] video_length = inputs[0]["length"] scores = outputs["pred_scores"] labels = outputs["pred_labels"] masks = outputs["pred_masks"] ytvis_results = [] for instance_id, (s, l, m) in enumerate(zip(scores, labels, masks)): segms = [ mask_util.encode(np.array(_mask[:, :, None], order="F", dtype="uint8"))[0] for _mask in m ] for rle in segms: rle["counts"] = rle["counts"].decode("utf-8") res = { "video_id": video_id, "score": s, "category_id": l, "segmentations": segms, } ytvis_results.append(res) return ytvis_results def _evaluate_predictions_on_coco( coco_gt, coco_results, img_ids=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results)
coco_eval = YTVOSeval(coco_gt, coco_dt)
1
2023-11-28 10:33:40+00:00
16k
aliyun/pai-python-sdk
pai/api/training_job.py
[ { "identifier": "PaginatedResult", "path": "pai/api/base.py", "snippet": "class PaginatedResult(object):\n \"\"\"A class represent response of a pagination call to PAI service.\"\"\"\n\n items: List[Union[Dict[str, Any], str]] = None\n total_count: int = None\n\n def __init__(self, items: List[Union[Dict[str, Any], str]], total_count: int):\n self.items = items\n self.total_count = total_count" }, { "identifier": "ServiceName", "path": "pai/api/base.py", "snippet": "class ServiceName(object):\n # Service provided by PAI.\n PAI_DLC = \"pai-dlc\"\n PAI_EAS = \"pai-eas\"\n PAI_WORKSPACE = \"aiworkspace\"\n PAI_STUDIO = \"pai\"\n PAIFLOW = \"paiflow\"\n # Other services provided by Alibaba Cloud.\n STS = \"sts\"" }, { "identifier": "WorkspaceScopedResourceAPI", "path": "pai/api/base.py", "snippet": "class WorkspaceScopedResourceAPI(with_metaclass(ABCMeta, ResourceAPI)):\n \"\"\"Workspace Scoped Resource API.\"\"\"\n\n # A workspace_id placeholder indicate the workspace_id field of\n # the request should not be replaced.\n workspace_id_none_placeholder = \"WORKSPACE_ID_NONE_PLACEHOLDER\"\n\n # Default parameter name for request object.\n default_param_name_for_request = \"request\"\n\n def __init__(self, workspace_id, acs_client, **kwargs):\n super(WorkspaceScopedResourceAPI, self).__init__(\n acs_client=acs_client, **kwargs\n )\n self.workspace_id = workspace_id\n\n def _do_request(self, method_, **kwargs):\n request = kwargs.get(self.default_param_name_for_request)\n\n if not request:\n # Sometimes, request object is not named as \"request\", we need to find it.\n for param_name, param_value in kwargs.items():\n if isinstance(param_value, TeaModel) and type(\n param_value\n ).__name__.endswith(\"Request\"):\n request = param_value\n break\n\n # Automatically configure the workspace ID for the request\n if request and hasattr(request, \"workspace_id\"):\n if request.workspace_id is None:\n request.workspace_id = self.workspace_id\n elif (\n request.workspace_id == self.workspace_id_none_placeholder\n or not request.workspace_id\n ):\n # request.workspace_id is 0 or request.workspace_id is empty string,\n # we do not inject workspace_id of the scope.\n request.workspace_id = None\n return super(WorkspaceScopedResourceAPI, self)._do_request(method_, **kwargs)" }, { "identifier": "AlgorithmSpec", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class AlgorithmSpec(TeaModel):\n def __init__(\n self,\n code_dir: Location = None,\n command: List[str] = None,\n compute_resource: AlgorithmSpecComputeResource = None,\n customization: AlgorithmSpecCustomization = None,\n hyper_parameters: List[HyperParameterDefinition] = None,\n image: str = None,\n input_channels: List[Channel] = None,\n job_type: str = None,\n metric_definitions: List[MetricDefinition] = None,\n output_channels: List[Channel] = None,\n progress_definitions: AlgorithmSpecProgressDefinitions = None,\n resource_requirements: List[ConditionExpression] = None,\n supported_instance_types: List[str] = None,\n supports_distributed_training: bool = None,\n ):\n self.code_dir = code_dir\n self.command = command\n self.compute_resource = compute_resource\n self.customization = customization\n self.hyper_parameters = hyper_parameters\n self.image = image\n self.input_channels = input_channels\n self.job_type = job_type\n self.metric_definitions = metric_definitions\n self.output_channels = output_channels\n self.progress_definitions = progress_definitions\n self.resource_requirements = resource_requirements\n self.supported_instance_types = supported_instance_types\n self.supports_distributed_training = supports_distributed_training\n\n def validate(self):\n if self.code_dir:\n self.code_dir.validate()\n if self.compute_resource:\n self.compute_resource.validate()\n if self.customization:\n self.customization.validate()\n if self.hyper_parameters:\n for k in self.hyper_parameters:\n if k:\n k.validate()\n if self.input_channels:\n for k in self.input_channels:\n if k:\n k.validate()\n if self.metric_definitions:\n for k in self.metric_definitions:\n if k:\n k.validate()\n if self.output_channels:\n for k in self.output_channels:\n if k:\n k.validate()\n if self.progress_definitions:\n self.progress_definitions.validate()\n if self.resource_requirements:\n for k in self.resource_requirements:\n if k:\n k.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.code_dir is not None:\n result['CodeDir'] = self.code_dir.to_map()\n if self.command is not None:\n result['Command'] = self.command\n if self.compute_resource is not None:\n result['ComputeResource'] = self.compute_resource.to_map()\n if self.customization is not None:\n result['Customization'] = self.customization.to_map()\n result['HyperParameters'] = []\n if self.hyper_parameters is not None:\n for k in self.hyper_parameters:\n result['HyperParameters'].append(k.to_map() if k else None)\n if self.image is not None:\n result['Image'] = self.image\n result['InputChannels'] = []\n if self.input_channels is not None:\n for k in self.input_channels:\n result['InputChannels'].append(k.to_map() if k else None)\n if self.job_type is not None:\n result['JobType'] = self.job_type\n result['MetricDefinitions'] = []\n if self.metric_definitions is not None:\n for k in self.metric_definitions:\n result['MetricDefinitions'].append(k.to_map() if k else None)\n result['OutputChannels'] = []\n if self.output_channels is not None:\n for k in self.output_channels:\n result['OutputChannels'].append(k.to_map() if k else None)\n if self.progress_definitions is not None:\n result['ProgressDefinitions'] = self.progress_definitions.to_map()\n result['ResourceRequirements'] = []\n if self.resource_requirements is not None:\n for k in self.resource_requirements:\n result['ResourceRequirements'].append(k.to_map() if k else None)\n if self.supported_instance_types is not None:\n result['SupportedInstanceTypes'] = self.supported_instance_types\n if self.supports_distributed_training is not None:\n result['SupportsDistributedTraining'] = self.supports_distributed_training\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('CodeDir') is not None:\n temp_model = Location()\n self.code_dir = temp_model.from_map(m['CodeDir'])\n if m.get('Command') is not None:\n self.command = m.get('Command')\n if m.get('ComputeResource') is not None:\n temp_model = AlgorithmSpecComputeResource()\n self.compute_resource = temp_model.from_map(m['ComputeResource'])\n if m.get('Customization') is not None:\n temp_model = AlgorithmSpecCustomization()\n self.customization = temp_model.from_map(m['Customization'])\n self.hyper_parameters = []\n if m.get('HyperParameters') is not None:\n for k in m.get('HyperParameters'):\n temp_model = HyperParameterDefinition()\n self.hyper_parameters.append(temp_model.from_map(k))\n if m.get('Image') is not None:\n self.image = m.get('Image')\n self.input_channels = []\n if m.get('InputChannels') is not None:\n for k in m.get('InputChannels'):\n temp_model = Channel()\n self.input_channels.append(temp_model.from_map(k))\n if m.get('JobType') is not None:\n self.job_type = m.get('JobType')\n self.metric_definitions = []\n if m.get('MetricDefinitions') is not None:\n for k in m.get('MetricDefinitions'):\n temp_model = MetricDefinition()\n self.metric_definitions.append(temp_model.from_map(k))\n self.output_channels = []\n if m.get('OutputChannels') is not None:\n for k in m.get('OutputChannels'):\n temp_model = Channel()\n self.output_channels.append(temp_model.from_map(k))\n if m.get('ProgressDefinitions') is not None:\n temp_model = AlgorithmSpecProgressDefinitions()\n self.progress_definitions = temp_model.from_map(m['ProgressDefinitions'])\n self.resource_requirements = []\n if m.get('ResourceRequirements') is not None:\n for k in m.get('ResourceRequirements'):\n temp_model = ConditionExpression()\n self.resource_requirements.append(temp_model.from_map(k))\n if m.get('SupportedInstanceTypes') is not None:\n self.supported_instance_types = m.get('SupportedInstanceTypes')\n if m.get('SupportsDistributedTraining') is not None:\n self.supports_distributed_training = m.get('SupportsDistributedTraining')\n return self" }, { "identifier": "CreateTrainingJobRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequest(TeaModel):\n def __init__(\n self,\n algorithm_name: str = None,\n algorithm_provider: str = None,\n algorithm_spec: AlgorithmSpec = None,\n algorithm_version: str = None,\n code_dir: Location = None,\n compute_resource: CreateTrainingJobRequestComputeResource = None,\n hyper_parameters: List[CreateTrainingJobRequestHyperParameters] = None,\n input_channels: List[CreateTrainingJobRequestInputChannels] = None,\n labels: List[CreateTrainingJobRequestLabels] = None,\n output_channels: List[CreateTrainingJobRequestOutputChannels] = None,\n role_arn: str = None,\n scheduler: CreateTrainingJobRequestScheduler = None,\n training_job_description: str = None,\n training_job_name: str = None,\n user_vpc: CreateTrainingJobRequestUserVpc = None,\n workspace_id: str = None,\n ):\n self.algorithm_name = algorithm_name\n self.algorithm_provider = algorithm_provider\n self.algorithm_spec = algorithm_spec\n self.algorithm_version = algorithm_version\n self.code_dir = code_dir\n self.compute_resource = compute_resource\n self.hyper_parameters = hyper_parameters\n self.input_channels = input_channels\n self.labels = labels\n self.output_channels = output_channels\n self.role_arn = role_arn\n self.scheduler = scheduler\n self.training_job_description = training_job_description\n self.training_job_name = training_job_name\n self.user_vpc = user_vpc\n self.workspace_id = workspace_id\n\n def validate(self):\n if self.algorithm_spec:\n self.algorithm_spec.validate()\n if self.code_dir:\n self.code_dir.validate()\n if self.compute_resource:\n self.compute_resource.validate()\n if self.hyper_parameters:\n for k in self.hyper_parameters:\n if k:\n k.validate()\n if self.input_channels:\n for k in self.input_channels:\n if k:\n k.validate()\n if self.labels:\n for k in self.labels:\n if k:\n k.validate()\n if self.output_channels:\n for k in self.output_channels:\n if k:\n k.validate()\n if self.scheduler:\n self.scheduler.validate()\n if self.user_vpc:\n self.user_vpc.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.algorithm_name is not None:\n result['AlgorithmName'] = self.algorithm_name\n if self.algorithm_provider is not None:\n result['AlgorithmProvider'] = self.algorithm_provider\n if self.algorithm_spec is not None:\n result['AlgorithmSpec'] = self.algorithm_spec.to_map()\n if self.algorithm_version is not None:\n result['AlgorithmVersion'] = self.algorithm_version\n if self.code_dir is not None:\n result['CodeDir'] = self.code_dir.to_map()\n if self.compute_resource is not None:\n result['ComputeResource'] = self.compute_resource.to_map()\n result['HyperParameters'] = []\n if self.hyper_parameters is not None:\n for k in self.hyper_parameters:\n result['HyperParameters'].append(k.to_map() if k else None)\n result['InputChannels'] = []\n if self.input_channels is not None:\n for k in self.input_channels:\n result['InputChannels'].append(k.to_map() if k else None)\n result['Labels'] = []\n if self.labels is not None:\n for k in self.labels:\n result['Labels'].append(k.to_map() if k else None)\n result['OutputChannels'] = []\n if self.output_channels is not None:\n for k in self.output_channels:\n result['OutputChannels'].append(k.to_map() if k else None)\n if self.role_arn is not None:\n result['RoleArn'] = self.role_arn\n if self.scheduler is not None:\n result['Scheduler'] = self.scheduler.to_map()\n if self.training_job_description is not None:\n result['TrainingJobDescription'] = self.training_job_description\n if self.training_job_name is not None:\n result['TrainingJobName'] = self.training_job_name\n if self.user_vpc is not None:\n result['UserVpc'] = self.user_vpc.to_map()\n if self.workspace_id is not None:\n result['WorkspaceId'] = self.workspace_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('AlgorithmName') is not None:\n self.algorithm_name = m.get('AlgorithmName')\n if m.get('AlgorithmProvider') is not None:\n self.algorithm_provider = m.get('AlgorithmProvider')\n if m.get('AlgorithmSpec') is not None:\n temp_model = AlgorithmSpec()\n self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec'])\n if m.get('AlgorithmVersion') is not None:\n self.algorithm_version = m.get('AlgorithmVersion')\n if m.get('CodeDir') is not None:\n temp_model = Location()\n self.code_dir = temp_model.from_map(m['CodeDir'])\n if m.get('ComputeResource') is not None:\n temp_model = CreateTrainingJobRequestComputeResource()\n self.compute_resource = temp_model.from_map(m['ComputeResource'])\n self.hyper_parameters = []\n if m.get('HyperParameters') is not None:\n for k in m.get('HyperParameters'):\n temp_model = CreateTrainingJobRequestHyperParameters()\n self.hyper_parameters.append(temp_model.from_map(k))\n self.input_channels = []\n if m.get('InputChannels') is not None:\n for k in m.get('InputChannels'):\n temp_model = CreateTrainingJobRequestInputChannels()\n self.input_channels.append(temp_model.from_map(k))\n self.labels = []\n if m.get('Labels') is not None:\n for k in m.get('Labels'):\n temp_model = CreateTrainingJobRequestLabels()\n self.labels.append(temp_model.from_map(k))\n self.output_channels = []\n if m.get('OutputChannels') is not None:\n for k in m.get('OutputChannels'):\n temp_model = CreateTrainingJobRequestOutputChannels()\n self.output_channels.append(temp_model.from_map(k))\n if m.get('RoleArn') is not None:\n self.role_arn = m.get('RoleArn')\n if m.get('Scheduler') is not None:\n temp_model = CreateTrainingJobRequestScheduler()\n self.scheduler = temp_model.from_map(m['Scheduler'])\n if m.get('TrainingJobDescription') is not None:\n self.training_job_description = m.get('TrainingJobDescription')\n if m.get('TrainingJobName') is not None:\n self.training_job_name = m.get('TrainingJobName')\n if m.get('UserVpc') is not None:\n temp_model = CreateTrainingJobRequestUserVpc()\n self.user_vpc = temp_model.from_map(m['UserVpc'])\n if m.get('WorkspaceId') is not None:\n self.workspace_id = m.get('WorkspaceId')\n return self" }, { "identifier": "CreateTrainingJobRequestComputeResource", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestComputeResource(TeaModel):\n def __init__(\n self,\n ecs_count: int = None,\n ecs_spec: str = None,\n instance_count: int = None,\n instance_spec: CreateTrainingJobRequestComputeResourceInstanceSpec = None,\n resource_id: str = None,\n ):\n self.ecs_count = ecs_count\n self.ecs_spec = ecs_spec\n self.instance_count = instance_count\n self.instance_spec = instance_spec\n self.resource_id = resource_id\n\n def validate(self):\n if self.instance_spec:\n self.instance_spec.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.ecs_count is not None:\n result['EcsCount'] = self.ecs_count\n if self.ecs_spec is not None:\n result['EcsSpec'] = self.ecs_spec\n if self.instance_count is not None:\n result['InstanceCount'] = self.instance_count\n if self.instance_spec is not None:\n result['InstanceSpec'] = self.instance_spec.to_map()\n if self.resource_id is not None:\n result['ResourceId'] = self.resource_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('EcsCount') is not None:\n self.ecs_count = m.get('EcsCount')\n if m.get('EcsSpec') is not None:\n self.ecs_spec = m.get('EcsSpec')\n if m.get('InstanceCount') is not None:\n self.instance_count = m.get('InstanceCount')\n if m.get('InstanceSpec') is not None:\n temp_model = CreateTrainingJobRequestComputeResourceInstanceSpec()\n self.instance_spec = temp_model.from_map(m['InstanceSpec'])\n if m.get('ResourceId') is not None:\n self.resource_id = m.get('ResourceId')\n return self" }, { "identifier": "CreateTrainingJobRequestHyperParameters", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestHyperParameters(TeaModel):\n def __init__(\n self,\n name: str = None,\n value: str = None,\n ):\n self.name = name\n self.value = value\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.name is not None:\n result['Name'] = self.name\n if self.value is not None:\n result['Value'] = self.value\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Name') is not None:\n self.name = m.get('Name')\n if m.get('Value') is not None:\n self.value = m.get('Value')\n return self" }, { "identifier": "CreateTrainingJobRequestInputChannels", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestInputChannels(TeaModel):\n def __init__(\n self,\n dataset_id: str = None,\n input_uri: str = None,\n name: str = None,\n ):\n self.dataset_id = dataset_id\n self.input_uri = input_uri\n self.name = name\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.dataset_id is not None:\n result['DatasetId'] = self.dataset_id\n if self.input_uri is not None:\n result['InputUri'] = self.input_uri\n if self.name is not None:\n result['Name'] = self.name\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('DatasetId') is not None:\n self.dataset_id = m.get('DatasetId')\n if m.get('InputUri') is not None:\n self.input_uri = m.get('InputUri')\n if m.get('Name') is not None:\n self.name = m.get('Name')\n return self" }, { "identifier": "CreateTrainingJobRequestLabels", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestLabels(TeaModel):\n def __init__(\n self,\n key: str = None,\n value: str = None,\n ):\n self.key = key\n self.value = value\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.key is not None:\n result['Key'] = self.key\n if self.value is not None:\n result['Value'] = self.value\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Key') is not None:\n self.key = m.get('Key')\n if m.get('Value') is not None:\n self.value = m.get('Value')\n return self" }, { "identifier": "CreateTrainingJobRequestOutputChannels", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestOutputChannels(TeaModel):\n def __init__(\n self,\n dataset_id: str = None,\n name: str = None,\n output_uri: str = None,\n ):\n self.dataset_id = dataset_id\n self.name = name\n self.output_uri = output_uri\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.dataset_id is not None:\n result['DatasetId'] = self.dataset_id\n if self.name is not None:\n result['Name'] = self.name\n if self.output_uri is not None:\n result['OutputUri'] = self.output_uri\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('DatasetId') is not None:\n self.dataset_id = m.get('DatasetId')\n if m.get('Name') is not None:\n self.name = m.get('Name')\n if m.get('OutputUri') is not None:\n self.output_uri = m.get('OutputUri')\n return self" }, { "identifier": "CreateTrainingJobRequestScheduler", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestScheduler(TeaModel):\n def __init__(\n self,\n max_running_time_in_seconds: int = None,\n ):\n self.max_running_time_in_seconds = max_running_time_in_seconds\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.max_running_time_in_seconds is not None:\n result['MaxRunningTimeInSeconds'] = self.max_running_time_in_seconds\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('MaxRunningTimeInSeconds') is not None:\n self.max_running_time_in_seconds = m.get('MaxRunningTimeInSeconds')\n return self" }, { "identifier": "CreateTrainingJobRequestUserVpc", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobRequestUserVpc(TeaModel):\n def __init__(\n self,\n extended_cidrs: List[str] = None,\n security_group_id: str = None,\n switch_id: str = None,\n vpc_id: str = None,\n ):\n self.extended_cidrs = extended_cidrs\n self.security_group_id = security_group_id\n self.switch_id = switch_id\n self.vpc_id = vpc_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.extended_cidrs is not None:\n result['ExtendedCIDRs'] = self.extended_cidrs\n if self.security_group_id is not None:\n result['SecurityGroupId'] = self.security_group_id\n if self.switch_id is not None:\n result['SwitchId'] = self.switch_id\n if self.vpc_id is not None:\n result['VpcId'] = self.vpc_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('ExtendedCIDRs') is not None:\n self.extended_cidrs = m.get('ExtendedCIDRs')\n if m.get('SecurityGroupId') is not None:\n self.security_group_id = m.get('SecurityGroupId')\n if m.get('SwitchId') is not None:\n self.switch_id = m.get('SwitchId')\n if m.get('VpcId') is not None:\n self.vpc_id = m.get('VpcId')\n return self" }, { "identifier": "CreateTrainingJobResponseBody", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class CreateTrainingJobResponseBody(TeaModel):\n def __init__(\n self,\n request_id: str = None,\n training_job_id: str = None,\n ):\n self.request_id = request_id\n self.training_job_id = training_job_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.request_id is not None:\n result['RequestId'] = self.request_id\n if self.training_job_id is not None:\n result['TrainingJobId'] = self.training_job_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('RequestId') is not None:\n self.request_id = m.get('RequestId')\n if m.get('TrainingJobId') is not None:\n self.training_job_id = m.get('TrainingJobId')\n return self" }, { "identifier": "GetTrainingJobRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class GetTrainingJobRequest(TeaModel):\n def __init__(\n self,\n token: str = None,\n ):\n self.token = token\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.token is not None:\n result['Token'] = self.token\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Token') is not None:\n self.token = m.get('Token')\n return self" }, { "identifier": "GetTrainingJobResponseBody", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class GetTrainingJobResponseBody(TeaModel):\n def __init__(\n self,\n algorithm_id: str = None,\n algorithm_name: str = None,\n algorithm_provider: str = None,\n algorithm_spec: AlgorithmSpec = None,\n algorithm_version: str = None,\n compute_resource: GetTrainingJobResponseBodyComputeResource = None,\n gmt_create_time: str = None,\n gmt_modified_time: str = None,\n hyper_parameters: List[GetTrainingJobResponseBodyHyperParameters] = None,\n input_channels: List[GetTrainingJobResponseBodyInputChannels] = None,\n instances: List[GetTrainingJobResponseBodyInstances] = None,\n is_temp_algo: bool = None,\n labels: List[GetTrainingJobResponseBodyLabels] = None,\n latest_metrics: List[GetTrainingJobResponseBodyLatestMetrics] = None,\n latest_progress: GetTrainingJobResponseBodyLatestProgress = None,\n output_channels: List[GetTrainingJobResponseBodyOutputChannels] = None,\n reason_code: str = None,\n reason_message: str = None,\n request_id: str = None,\n role_arn: str = None,\n scheduler: GetTrainingJobResponseBodyScheduler = None,\n status: str = None,\n status_transitions: List[GetTrainingJobResponseBodyStatusTransitions] = None,\n training_job_description: str = None,\n training_job_id: str = None,\n training_job_name: str = None,\n training_job_url: str = None,\n user_id: str = None,\n user_vpc: GetTrainingJobResponseBodyUserVpc = None,\n workspace_id: str = None,\n ):\n self.algorithm_id = algorithm_id\n self.algorithm_name = algorithm_name\n self.algorithm_provider = algorithm_provider\n self.algorithm_spec = algorithm_spec\n self.algorithm_version = algorithm_version\n self.compute_resource = compute_resource\n self.gmt_create_time = gmt_create_time\n self.gmt_modified_time = gmt_modified_time\n self.hyper_parameters = hyper_parameters\n self.input_channels = input_channels\n self.instances = instances\n self.is_temp_algo = is_temp_algo\n self.labels = labels\n self.latest_metrics = latest_metrics\n self.latest_progress = latest_progress\n self.output_channels = output_channels\n self.reason_code = reason_code\n self.reason_message = reason_message\n self.request_id = request_id\n self.role_arn = role_arn\n self.scheduler = scheduler\n self.status = status\n self.status_transitions = status_transitions\n self.training_job_description = training_job_description\n self.training_job_id = training_job_id\n self.training_job_name = training_job_name\n self.training_job_url = training_job_url\n self.user_id = user_id\n self.user_vpc = user_vpc\n self.workspace_id = workspace_id\n\n def validate(self):\n if self.algorithm_spec:\n self.algorithm_spec.validate()\n if self.compute_resource:\n self.compute_resource.validate()\n if self.hyper_parameters:\n for k in self.hyper_parameters:\n if k:\n k.validate()\n if self.input_channels:\n for k in self.input_channels:\n if k:\n k.validate()\n if self.instances:\n for k in self.instances:\n if k:\n k.validate()\n if self.labels:\n for k in self.labels:\n if k:\n k.validate()\n if self.latest_metrics:\n for k in self.latest_metrics:\n if k:\n k.validate()\n if self.latest_progress:\n self.latest_progress.validate()\n if self.output_channels:\n for k in self.output_channels:\n if k:\n k.validate()\n if self.scheduler:\n self.scheduler.validate()\n if self.status_transitions:\n for k in self.status_transitions:\n if k:\n k.validate()\n if self.user_vpc:\n self.user_vpc.validate()\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.algorithm_id is not None:\n result['AlgorithmId'] = self.algorithm_id\n if self.algorithm_name is not None:\n result['AlgorithmName'] = self.algorithm_name\n if self.algorithm_provider is not None:\n result['AlgorithmProvider'] = self.algorithm_provider\n if self.algorithm_spec is not None:\n result['AlgorithmSpec'] = self.algorithm_spec.to_map()\n if self.algorithm_version is not None:\n result['AlgorithmVersion'] = self.algorithm_version\n if self.compute_resource is not None:\n result['ComputeResource'] = self.compute_resource.to_map()\n if self.gmt_create_time is not None:\n result['GmtCreateTime'] = self.gmt_create_time\n if self.gmt_modified_time is not None:\n result['GmtModifiedTime'] = self.gmt_modified_time\n result['HyperParameters'] = []\n if self.hyper_parameters is not None:\n for k in self.hyper_parameters:\n result['HyperParameters'].append(k.to_map() if k else None)\n result['InputChannels'] = []\n if self.input_channels is not None:\n for k in self.input_channels:\n result['InputChannels'].append(k.to_map() if k else None)\n result['Instances'] = []\n if self.instances is not None:\n for k in self.instances:\n result['Instances'].append(k.to_map() if k else None)\n if self.is_temp_algo is not None:\n result['IsTempAlgo'] = self.is_temp_algo\n result['Labels'] = []\n if self.labels is not None:\n for k in self.labels:\n result['Labels'].append(k.to_map() if k else None)\n result['LatestMetrics'] = []\n if self.latest_metrics is not None:\n for k in self.latest_metrics:\n result['LatestMetrics'].append(k.to_map() if k else None)\n if self.latest_progress is not None:\n result['LatestProgress'] = self.latest_progress.to_map()\n result['OutputChannels'] = []\n if self.output_channels is not None:\n for k in self.output_channels:\n result['OutputChannels'].append(k.to_map() if k else None)\n if self.reason_code is not None:\n result['ReasonCode'] = self.reason_code\n if self.reason_message is not None:\n result['ReasonMessage'] = self.reason_message\n if self.request_id is not None:\n result['RequestId'] = self.request_id\n if self.role_arn is not None:\n result['RoleArn'] = self.role_arn\n if self.scheduler is not None:\n result['Scheduler'] = self.scheduler.to_map()\n if self.status is not None:\n result['Status'] = self.status\n result['StatusTransitions'] = []\n if self.status_transitions is not None:\n for k in self.status_transitions:\n result['StatusTransitions'].append(k.to_map() if k else None)\n if self.training_job_description is not None:\n result['TrainingJobDescription'] = self.training_job_description\n if self.training_job_id is not None:\n result['TrainingJobId'] = self.training_job_id\n if self.training_job_name is not None:\n result['TrainingJobName'] = self.training_job_name\n if self.training_job_url is not None:\n result['TrainingJobUrl'] = self.training_job_url\n if self.user_id is not None:\n result['UserId'] = self.user_id\n if self.user_vpc is not None:\n result['UserVpc'] = self.user_vpc.to_map()\n if self.workspace_id is not None:\n result['WorkspaceId'] = self.workspace_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('AlgorithmId') is not None:\n self.algorithm_id = m.get('AlgorithmId')\n if m.get('AlgorithmName') is not None:\n self.algorithm_name = m.get('AlgorithmName')\n if m.get('AlgorithmProvider') is not None:\n self.algorithm_provider = m.get('AlgorithmProvider')\n if m.get('AlgorithmSpec') is not None:\n temp_model = AlgorithmSpec()\n self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec'])\n if m.get('AlgorithmVersion') is not None:\n self.algorithm_version = m.get('AlgorithmVersion')\n if m.get('ComputeResource') is not None:\n temp_model = GetTrainingJobResponseBodyComputeResource()\n self.compute_resource = temp_model.from_map(m['ComputeResource'])\n if m.get('GmtCreateTime') is not None:\n self.gmt_create_time = m.get('GmtCreateTime')\n if m.get('GmtModifiedTime') is not None:\n self.gmt_modified_time = m.get('GmtModifiedTime')\n self.hyper_parameters = []\n if m.get('HyperParameters') is not None:\n for k in m.get('HyperParameters'):\n temp_model = GetTrainingJobResponseBodyHyperParameters()\n self.hyper_parameters.append(temp_model.from_map(k))\n self.input_channels = []\n if m.get('InputChannels') is not None:\n for k in m.get('InputChannels'):\n temp_model = GetTrainingJobResponseBodyInputChannels()\n self.input_channels.append(temp_model.from_map(k))\n self.instances = []\n if m.get('Instances') is not None:\n for k in m.get('Instances'):\n temp_model = GetTrainingJobResponseBodyInstances()\n self.instances.append(temp_model.from_map(k))\n if m.get('IsTempAlgo') is not None:\n self.is_temp_algo = m.get('IsTempAlgo')\n self.labels = []\n if m.get('Labels') is not None:\n for k in m.get('Labels'):\n temp_model = GetTrainingJobResponseBodyLabels()\n self.labels.append(temp_model.from_map(k))\n self.latest_metrics = []\n if m.get('LatestMetrics') is not None:\n for k in m.get('LatestMetrics'):\n temp_model = GetTrainingJobResponseBodyLatestMetrics()\n self.latest_metrics.append(temp_model.from_map(k))\n if m.get('LatestProgress') is not None:\n temp_model = GetTrainingJobResponseBodyLatestProgress()\n self.latest_progress = temp_model.from_map(m['LatestProgress'])\n self.output_channels = []\n if m.get('OutputChannels') is not None:\n for k in m.get('OutputChannels'):\n temp_model = GetTrainingJobResponseBodyOutputChannels()\n self.output_channels.append(temp_model.from_map(k))\n if m.get('ReasonCode') is not None:\n self.reason_code = m.get('ReasonCode')\n if m.get('ReasonMessage') is not None:\n self.reason_message = m.get('ReasonMessage')\n if m.get('RequestId') is not None:\n self.request_id = m.get('RequestId')\n if m.get('RoleArn') is not None:\n self.role_arn = m.get('RoleArn')\n if m.get('Scheduler') is not None:\n temp_model = GetTrainingJobResponseBodyScheduler()\n self.scheduler = temp_model.from_map(m['Scheduler'])\n if m.get('Status') is not None:\n self.status = m.get('Status')\n self.status_transitions = []\n if m.get('StatusTransitions') is not None:\n for k in m.get('StatusTransitions'):\n temp_model = GetTrainingJobResponseBodyStatusTransitions()\n self.status_transitions.append(temp_model.from_map(k))\n if m.get('TrainingJobDescription') is not None:\n self.training_job_description = m.get('TrainingJobDescription')\n if m.get('TrainingJobId') is not None:\n self.training_job_id = m.get('TrainingJobId')\n if m.get('TrainingJobName') is not None:\n self.training_job_name = m.get('TrainingJobName')\n if m.get('TrainingJobUrl') is not None:\n self.training_job_url = m.get('TrainingJobUrl')\n if m.get('UserId') is not None:\n self.user_id = m.get('UserId')\n if m.get('UserVpc') is not None:\n temp_model = GetTrainingJobResponseBodyUserVpc()\n self.user_vpc = temp_model.from_map(m['UserVpc'])\n if m.get('WorkspaceId') is not None:\n self.workspace_id = m.get('WorkspaceId')\n return self" }, { "identifier": "ListTrainingJobLogsRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class ListTrainingJobLogsRequest(TeaModel):\n def __init__(\n self,\n end_time: str = None,\n page_number: int = None,\n page_size: int = None,\n start_time: str = None,\n token: str = None,\n worker_id: str = None,\n ):\n self.end_time = end_time\n self.page_number = page_number\n self.page_size = page_size\n self.start_time = start_time\n self.token = token\n self.worker_id = worker_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.end_time is not None:\n result['EndTime'] = self.end_time\n if self.page_number is not None:\n result['PageNumber'] = self.page_number\n if self.page_size is not None:\n result['PageSize'] = self.page_size\n if self.start_time is not None:\n result['StartTime'] = self.start_time\n if self.token is not None:\n result['Token'] = self.token\n if self.worker_id is not None:\n result['WorkerId'] = self.worker_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('EndTime') is not None:\n self.end_time = m.get('EndTime')\n if m.get('PageNumber') is not None:\n self.page_number = m.get('PageNumber')\n if m.get('PageSize') is not None:\n self.page_size = m.get('PageSize')\n if m.get('StartTime') is not None:\n self.start_time = m.get('StartTime')\n if m.get('Token') is not None:\n self.token = m.get('Token')\n if m.get('WorkerId') is not None:\n self.worker_id = m.get('WorkerId')\n return self" }, { "identifier": "ListTrainingJobLogsResponseBody", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class ListTrainingJobLogsResponseBody(TeaModel):\n def __init__(\n self,\n logs: List[str] = None,\n request_id: str = None,\n total_count: str = None,\n ):\n self.logs = logs\n self.request_id = request_id\n self.total_count = total_count\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.logs is not None:\n result['Logs'] = self.logs\n if self.request_id is not None:\n result['RequestId'] = self.request_id\n if self.total_count is not None:\n result['TotalCount'] = self.total_count\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('Logs') is not None:\n self.logs = m.get('Logs')\n if m.get('RequestId') is not None:\n self.request_id = m.get('RequestId')\n if m.get('TotalCount') is not None:\n self.total_count = m.get('TotalCount')\n return self" }, { "identifier": "ListTrainingJobsRequest", "path": "pai/libs/alibabacloud_paistudio20220112/models.py", "snippet": "class ListTrainingJobsRequest(TeaModel):\n def __init__(\n self,\n algorithm_name: str = None,\n algorithm_provider: str = None,\n end_time: str = None,\n is_temp_algo: bool = None,\n labels: Dict[str, Any] = None,\n order: str = None,\n page_number: int = None,\n page_size: int = None,\n sort_by: str = None,\n start_time: str = None,\n status: str = None,\n training_job_id: str = None,\n training_job_name: str = None,\n workspace_id: str = None,\n ):\n self.algorithm_name = algorithm_name\n self.algorithm_provider = algorithm_provider\n self.end_time = end_time\n self.is_temp_algo = is_temp_algo\n self.labels = labels\n self.order = order\n self.page_number = page_number\n self.page_size = page_size\n self.sort_by = sort_by\n self.start_time = start_time\n self.status = status\n self.training_job_id = training_job_id\n self.training_job_name = training_job_name\n self.workspace_id = workspace_id\n\n def validate(self):\n pass\n\n def to_map(self):\n _map = super().to_map()\n if _map is not None:\n return _map\n\n result = dict()\n if self.algorithm_name is not None:\n result['AlgorithmName'] = self.algorithm_name\n if self.algorithm_provider is not None:\n result['AlgorithmProvider'] = self.algorithm_provider\n if self.end_time is not None:\n result['EndTime'] = self.end_time\n if self.is_temp_algo is not None:\n result['IsTempAlgo'] = self.is_temp_algo\n if self.labels is not None:\n result['Labels'] = self.labels\n if self.order is not None:\n result['Order'] = self.order\n if self.page_number is not None:\n result['PageNumber'] = self.page_number\n if self.page_size is not None:\n result['PageSize'] = self.page_size\n if self.sort_by is not None:\n result['SortBy'] = self.sort_by\n if self.start_time is not None:\n result['StartTime'] = self.start_time\n if self.status is not None:\n result['Status'] = self.status\n if self.training_job_id is not None:\n result['TrainingJobId'] = self.training_job_id\n if self.training_job_name is not None:\n result['TrainingJobName'] = self.training_job_name\n if self.workspace_id is not None:\n result['WorkspaceId'] = self.workspace_id\n return result\n\n def from_map(self, m: dict = None):\n m = m or dict()\n if m.get('AlgorithmName') is not None:\n self.algorithm_name = m.get('AlgorithmName')\n if m.get('AlgorithmProvider') is not None:\n self.algorithm_provider = m.get('AlgorithmProvider')\n if m.get('EndTime') is not None:\n self.end_time = m.get('EndTime')\n if m.get('IsTempAlgo') is not None:\n self.is_temp_algo = m.get('IsTempAlgo')\n if m.get('Labels') is not None:\n self.labels = m.get('Labels')\n if m.get('Order') is not None:\n self.order = m.get('Order')\n if m.get('PageNumber') is not None:\n self.page_number = m.get('PageNumber')\n if m.get('PageSize') is not None:\n self.page_size = m.get('PageSize')\n if m.get('SortBy') is not None:\n self.sort_by = m.get('SortBy')\n if m.get('StartTime') is not None:\n self.start_time = m.get('StartTime')\n if m.get('Status') is not None:\n self.status = m.get('Status')\n if m.get('TrainingJobId') is not None:\n self.training_job_id = m.get('TrainingJobId')\n if m.get('TrainingJobName') is not None:\n self.training_job_name = m.get('TrainingJobName')\n if m.get('WorkspaceId') is not None:\n self.workspace_id = m.get('WorkspaceId')\n return self" } ]
from typing import Any, Dict, List, Optional from ..api.base import PaginatedResult, ServiceName, WorkspaceScopedResourceAPI from ..libs.alibabacloud_paistudio20220112.models import ( AlgorithmSpec, CreateTrainingJobRequest, CreateTrainingJobRequestComputeResource, CreateTrainingJobRequestHyperParameters, CreateTrainingJobRequestInputChannels, CreateTrainingJobRequestLabels, CreateTrainingJobRequestOutputChannels, CreateTrainingJobRequestScheduler, CreateTrainingJobRequestUserVpc, CreateTrainingJobResponseBody, GetTrainingJobRequest, GetTrainingJobResponseBody, ListTrainingJobLogsRequest, ListTrainingJobLogsResponseBody, ListTrainingJobsRequest, )
11,893
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class TrainingJobAPI(WorkspaceScopedResourceAPI): BACKEND_SERVICE_NAME = ServiceName.PAI_STUDIO _list_method = "list_training_jobs_with_options" _create_method = "create_training_job_with_options" _get_method = "get_training_job_with_options" _list_logs_method = "list_training_job_logs_with_options" # _list_method = "list_training_jobs_with_options" def list( self, page_size: int = 20, page_number: int = 1, order: str = None, sort_by: str = None, status: str = None, training_job_name: str = None, ) -> PaginatedResult: request = ListTrainingJobsRequest( page_size=page_size, page_number=page_number, status=status, training_job_name=training_job_name, order=order, sort_by=sort_by, ) res = self._do_request( method_=self._list_method, tmp_req=request, ) return self.make_paginated_result(res) def get_api_object_by_resource_id(self, resource_id) -> Dict[str, Any]: res: GetTrainingJobResponseBody = self._do_request( method_=self._get_method, training_job_id=resource_id,
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class TrainingJobAPI(WorkspaceScopedResourceAPI): BACKEND_SERVICE_NAME = ServiceName.PAI_STUDIO _list_method = "list_training_jobs_with_options" _create_method = "create_training_job_with_options" _get_method = "get_training_job_with_options" _list_logs_method = "list_training_job_logs_with_options" # _list_method = "list_training_jobs_with_options" def list( self, page_size: int = 20, page_number: int = 1, order: str = None, sort_by: str = None, status: str = None, training_job_name: str = None, ) -> PaginatedResult: request = ListTrainingJobsRequest( page_size=page_size, page_number=page_number, status=status, training_job_name=training_job_name, order=order, sort_by=sort_by, ) res = self._do_request( method_=self._list_method, tmp_req=request, ) return self.make_paginated_result(res) def get_api_object_by_resource_id(self, resource_id) -> Dict[str, Any]: res: GetTrainingJobResponseBody = self._do_request( method_=self._get_method, training_job_id=resource_id,
request=GetTrainingJobRequest(),
13
2023-12-01 01:40:12+00:00
16k
JunMa11/UHNSeg-Quiz
nnunetv2/inference/predict_from_raw_data.py
[ { "identifier": "default_num_processes", "path": "nnunetv2/configuration.py", "snippet": "ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low" }, { "identifier": "PreprocessAdapterFromNpy", "path": "nnunetv2/inference/data_iterators.py", "snippet": "class PreprocessAdapterFromNpy(DataLoader):\n def __init__(self, list_of_images: List[np.ndarray],\n list_of_segs_from_prev_stage: Union[List[np.ndarray], None],\n list_of_image_properties: List[dict],\n truncated_ofnames: Union[List[str], None],\n plans_manager: PlansManager, dataset_json: dict, configuration_manager: ConfigurationManager,\n num_threads_in_multithreaded: int = 1, verbose: bool = False):\n preprocessor = configuration_manager.preprocessor_class(verbose=verbose)\n self.preprocessor, self.plans_manager, self.configuration_manager, self.dataset_json, self.truncated_ofnames = \\\n preprocessor, plans_manager, configuration_manager, dataset_json, truncated_ofnames\n\n self.label_manager = plans_manager.get_label_manager(dataset_json)\n\n if list_of_segs_from_prev_stage is None:\n list_of_segs_from_prev_stage = [None] * len(list_of_images)\n if truncated_ofnames is None:\n truncated_ofnames = [None] * len(list_of_images)\n\n super().__init__(\n list(zip(list_of_images, list_of_segs_from_prev_stage, list_of_image_properties, truncated_ofnames)),\n 1, num_threads_in_multithreaded,\n seed_for_shuffle=1, return_incomplete=True,\n shuffle=False, infinite=False, sampling_probabilities=None)\n\n self.indices = list(range(len(list_of_images)))\n\n def generate_train_batch(self):\n idx = self.get_indices()[0]\n image = self._data[idx][0]\n seg_prev_stage = self._data[idx][1]\n props = self._data[idx][2]\n ofname = self._data[idx][3]\n # if we have a segmentation from the previous stage we have to process it together with the images so that we\n # can crop it appropriately (if needed). Otherwise it would just be resized to the shape of the data after\n # preprocessing and then there might be misalignments\n data, seg = self.preprocessor.run_case_npy(image, seg_prev_stage, props,\n self.plans_manager,\n self.configuration_manager,\n self.dataset_json)\n if seg_prev_stage is not None:\n seg_onehot = convert_labelmap_to_one_hot(seg[0], self.label_manager.foreground_labels, data.dtype)\n data = np.vstack((data, seg_onehot))\n\n data = torch.from_numpy(data)\n\n return {'data': data, 'data_properties': props, 'ofile': ofname}" }, { "identifier": "preprocessing_iterator_fromfiles", "path": "nnunetv2/inference/data_iterators.py", "snippet": "def preprocessing_iterator_fromfiles(list_of_lists: List[List[str]],\n list_of_segs_from_prev_stage_files: Union[None, List[str]],\n output_filenames_truncated: Union[None, List[str]],\n plans_manager: PlansManager,\n dataset_json: dict,\n configuration_manager: ConfigurationManager,\n num_processes: int,\n pin_memory: bool = False,\n verbose: bool = False):\n context = multiprocessing.get_context('spawn')\n manager = Manager()\n num_processes = min(len(list_of_lists), num_processes)\n assert num_processes >= 1\n processes = []\n done_events = []\n target_queues = []\n abort_event = manager.Event()\n for i in range(num_processes):\n event = manager.Event()\n queue = Manager().Queue(maxsize=1)\n pr = context.Process(target=preprocess_fromfiles_save_to_queue,\n args=(\n list_of_lists[i::num_processes],\n list_of_segs_from_prev_stage_files[\n i::num_processes] if list_of_segs_from_prev_stage_files is not None else None,\n output_filenames_truncated[\n i::num_processes] if output_filenames_truncated is not None else None,\n plans_manager,\n dataset_json,\n configuration_manager,\n queue,\n event,\n abort_event,\n verbose\n ), daemon=True)\n pr.start()\n target_queues.append(queue)\n done_events.append(event)\n processes.append(pr)\n\n worker_ctr = 0\n while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()):\n if not target_queues[worker_ctr].empty():\n item = target_queues[worker_ctr].get()\n worker_ctr = (worker_ctr + 1) % num_processes\n else:\n all_ok = all(\n [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set()\n if not all_ok:\n raise RuntimeError('Background workers died. Look for the error message further up! If there is '\n 'none then your RAM was full and the worker was killed by the OS. Use fewer '\n 'workers or get more RAM in that case!')\n sleep(0.01)\n continue\n if pin_memory:\n [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]\n yield item\n [p.join() for p in processes]" }, { "identifier": "preprocessing_iterator_fromnpy", "path": "nnunetv2/inference/data_iterators.py", "snippet": "def preprocessing_iterator_fromnpy(list_of_images: List[np.ndarray],\n list_of_segs_from_prev_stage: Union[List[np.ndarray], None],\n list_of_image_properties: List[dict],\n truncated_ofnames: Union[List[str], None],\n plans_manager: PlansManager,\n dataset_json: dict,\n configuration_manager: ConfigurationManager,\n num_processes: int,\n pin_memory: bool = False,\n verbose: bool = False):\n context = multiprocessing.get_context('spawn')\n manager = Manager()\n num_processes = min(len(list_of_images), num_processes)\n assert num_processes >= 1\n target_queues = []\n processes = []\n done_events = []\n abort_event = manager.Event()\n for i in range(num_processes):\n event = manager.Event()\n queue = manager.Queue(maxsize=1)\n pr = context.Process(target=preprocess_fromnpy_save_to_queue,\n args=(\n list_of_images[i::num_processes],\n list_of_segs_from_prev_stage[\n i::num_processes] if list_of_segs_from_prev_stage is not None else None,\n list_of_image_properties[i::num_processes],\n truncated_ofnames[i::num_processes] if truncated_ofnames is not None else None,\n plans_manager,\n dataset_json,\n configuration_manager,\n queue,\n event,\n abort_event,\n verbose\n ), daemon=True)\n pr.start()\n done_events.append(event)\n processes.append(pr)\n target_queues.append(queue)\n\n worker_ctr = 0\n while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()):\n if not target_queues[worker_ctr].empty():\n item = target_queues[worker_ctr].get()\n worker_ctr = (worker_ctr + 1) % num_processes\n else:\n all_ok = all(\n [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set()\n if not all_ok:\n raise RuntimeError('Background workers died. Look for the error message further up! If there is '\n 'none then your RAM was full and the worker was killed by the OS. Use fewer '\n 'workers or get more RAM in that case!')\n sleep(0.01)\n continue\n if pin_memory:\n [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]\n yield item\n [p.join() for p in processes]" }, { "identifier": "export_prediction_from_logits", "path": "nnunetv2/inference/export_prediction.py", "snippet": "def export_prediction_from_logits(predicted_array_or_file: Union[np.ndarray, torch.Tensor], properties_dict: dict,\n configuration_manager: ConfigurationManager,\n plans_manager: PlansManager,\n dataset_json_dict_or_file: Union[dict, str], output_file_truncated: str,\n save_probabilities: bool = False):\n # if isinstance(predicted_array_or_file, str):\n # tmp = deepcopy(predicted_array_or_file)\n # if predicted_array_or_file.endswith('.npy'):\n # predicted_array_or_file = np.load(predicted_array_or_file)\n # elif predicted_array_or_file.endswith('.npz'):\n # predicted_array_or_file = np.load(predicted_array_or_file)['softmax']\n # os.remove(tmp)\n\n if isinstance(dataset_json_dict_or_file, str):\n dataset_json_dict_or_file = load_json(dataset_json_dict_or_file)\n\n label_manager = plans_manager.get_label_manager(dataset_json_dict_or_file)\n ret = convert_predicted_logits_to_segmentation_with_correct_shape(\n predicted_array_or_file, plans_manager, configuration_manager, label_manager, properties_dict,\n return_probabilities=save_probabilities\n )\n del predicted_array_or_file\n\n # save\n if save_probabilities:\n segmentation_final, probabilities_final = ret\n np.savez_compressed(output_file_truncated + '.npz', probabilities=probabilities_final)\n save_pickle(properties_dict, output_file_truncated + '.pkl')\n del probabilities_final, ret\n else:\n segmentation_final = ret\n del ret\n\n rw = plans_manager.image_reader_writer_class()\n rw.write_seg(segmentation_final, output_file_truncated + dataset_json_dict_or_file['file_ending'],\n properties_dict)" }, { "identifier": "convert_predicted_logits_to_segmentation_with_correct_shape", "path": "nnunetv2/inference/export_prediction.py", "snippet": "def convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits: Union[torch.Tensor, np.ndarray],\n plans_manager: PlansManager,\n configuration_manager: ConfigurationManager,\n label_manager: LabelManager,\n properties_dict: dict,\n return_probabilities: bool = False,\n num_threads_torch: int = default_num_processes):\n old_threads = torch.get_num_threads()\n torch.set_num_threads(num_threads_torch)\n\n # resample to original shape\n current_spacing = configuration_manager.spacing if \\\n len(configuration_manager.spacing) == \\\n len(properties_dict['shape_after_cropping_and_before_resampling']) else \\\n [properties_dict['spacing'][0], *configuration_manager.spacing]\n predicted_logits = configuration_manager.resampling_fn_probabilities(predicted_logits,\n properties_dict['shape_after_cropping_and_before_resampling'],\n current_spacing,\n properties_dict['spacing'])\n # return value of resampling_fn_probabilities can be ndarray or Tensor but that does not matter because\n # apply_inference_nonlin will convert to torch\n predicted_probabilities = label_manager.apply_inference_nonlin(predicted_logits)\n del predicted_logits\n segmentation = label_manager.convert_probabilities_to_segmentation(predicted_probabilities)\n\n # segmentation may be torch.Tensor but we continue with numpy\n if isinstance(segmentation, torch.Tensor):\n segmentation = segmentation.cpu().numpy()\n\n # put segmentation in bbox (revert cropping)\n segmentation_reverted_cropping = np.zeros(properties_dict['shape_before_cropping'],\n dtype=np.uint8 if len(label_manager.foreground_labels) < 255 else np.uint16)\n slicer = bounding_box_to_slice(properties_dict['bbox_used_for_cropping'])\n segmentation_reverted_cropping[slicer] = segmentation\n del segmentation\n\n # revert transpose\n segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(plans_manager.transpose_backward)\n if return_probabilities:\n # revert cropping\n predicted_probabilities = label_manager.revert_cropping_on_probabilities(predicted_probabilities,\n properties_dict[\n 'bbox_used_for_cropping'],\n properties_dict[\n 'shape_before_cropping'])\n predicted_probabilities = predicted_probabilities.cpu().numpy()\n # revert transpose\n predicted_probabilities = predicted_probabilities.transpose([0] + [i + 1 for i in\n plans_manager.transpose_backward])\n torch.set_num_threads(old_threads)\n return segmentation_reverted_cropping, predicted_probabilities\n else:\n torch.set_num_threads(old_threads)\n return segmentation_reverted_cropping" }, { "identifier": "compute_gaussian", "path": "nnunetv2/inference/sliding_window_prediction.py", "snippet": "@lru_cache(maxsize=2)\ndef compute_gaussian(tile_size: Union[Tuple[int, ...], List[int]], sigma_scale: float = 1. / 8,\n value_scaling_factor: float = 1, dtype=torch.float16, device=torch.device('cuda', 0)) \\\n -> torch.Tensor:\n tmp = np.zeros(tile_size)\n center_coords = [i // 2 for i in tile_size]\n sigmas = [i * sigma_scale for i in tile_size]\n tmp[tuple(center_coords)] = 1\n gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map)\n\n gaussian_importance_map = gaussian_importance_map / torch.max(gaussian_importance_map) * value_scaling_factor\n gaussian_importance_map = gaussian_importance_map.type(dtype).to(device)\n\n # gaussian_importance_map cannot be 0, otherwise we may end up with nans!\n gaussian_importance_map[gaussian_importance_map == 0] = torch.min(\n gaussian_importance_map[gaussian_importance_map != 0])\n\n return gaussian_importance_map" }, { "identifier": "compute_steps_for_sliding_window", "path": "nnunetv2/inference/sliding_window_prediction.py", "snippet": "def compute_steps_for_sliding_window(image_size: Tuple[int, ...], tile_size: Tuple[int, ...], tile_step_size: float) -> \\\n List[List[int]]:\n assert [i >= j for i, j in zip(image_size, tile_size)], \"image size must be as large or larger than patch_size\"\n assert 0 < tile_step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'\n\n # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of\n # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46\n target_step_sizes_in_voxels = [i * tile_step_size for i in tile_size]\n\n num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, tile_size)]\n\n steps = []\n for dim in range(len(tile_size)):\n # the highest step value for this dimension is\n max_step_value = image_size[dim] - tile_size[dim]\n if num_steps[dim] > 1:\n actual_step_size = max_step_value / (num_steps[dim] - 1)\n else:\n actual_step_size = 99999999999 # does not matter because there is only one step at 0\n\n steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]\n\n steps.append(steps_here)\n\n return steps" }, { "identifier": "get_output_folder", "path": "nnunetv2/utilities/file_path_utilities.py", "snippet": "def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer',\n plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres',\n fold: Union[str, int] = None) -> str:\n tmp = join(nnUNet_results, maybe_convert_to_dataset_name(dataset_name_or_id),\n convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration))\n if fold is not None:\n tmp = join(tmp, f'fold_{fold}')\n return tmp" }, { "identifier": "check_workers_alive_and_busy", "path": "nnunetv2/utilities/file_path_utilities.py", "snippet": "def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0):\n \"\"\"\n\n returns True if the number of results that are not ready is greater than the number of available workers + allowed_num_queued\n \"\"\"\n alive = [i.is_alive() for i in worker_list]\n if not all(alive):\n raise RuntimeError('Some background workers are no longer alive')\n\n not_ready = [not i.ready() for i in results_list]\n if sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued):\n return True\n return False" }, { "identifier": "recursive_find_python_class", "path": "nnunetv2/utilities/find_class_by_name.py", "snippet": "def recursive_find_python_class(folder: str, class_name: str, current_module: str):\n tr = None\n for importer, modname, ispkg in pkgutil.iter_modules([folder]):\n # print(modname, ispkg)\n if not ispkg:\n m = importlib.import_module(current_module + \".\" + modname)\n if hasattr(m, class_name):\n tr = getattr(m, class_name)\n break\n\n if tr is None:\n for importer, modname, ispkg in pkgutil.iter_modules([folder]):\n if ispkg:\n next_current_module = current_module + \".\" + modname\n tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module)\n if tr is not None:\n break\n return tr" }, { "identifier": "empty_cache", "path": "nnunetv2/utilities/helpers.py", "snippet": "def empty_cache(device: torch.device):\n if device.type == 'cuda':\n torch.cuda.empty_cache()\n elif device.type == 'mps':\n from torch import mps\n mps.empty_cache()\n else:\n pass" }, { "identifier": "dummy_context", "path": "nnunetv2/utilities/helpers.py", "snippet": "class dummy_context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass" }, { "identifier": "recursive_fix_for_json_export", "path": "nnunetv2/utilities/json_export.py", "snippet": "def recursive_fix_for_json_export(my_dict: dict):\n # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro.\n keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys....\n for k in keys:\n if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)):\n tmp = my_dict[k]\n del my_dict[k]\n my_dict[int(k)] = tmp\n del tmp\n k = int(k)\n\n if isinstance(my_dict[k], dict):\n recursive_fix_for_json_export(my_dict[k])\n elif isinstance(my_dict[k], np.ndarray):\n assert my_dict[k].ndim == 1, 'only 1d arrays are supported'\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=list)\n elif isinstance(my_dict[k], (np.bool_,)):\n my_dict[k] = bool(my_dict[k])\n elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)):\n my_dict[k] = int(my_dict[k])\n elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)):\n my_dict[k] = float(my_dict[k])\n elif isinstance(my_dict[k], list):\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k]))\n elif isinstance(my_dict[k], tuple):\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple)\n elif isinstance(my_dict[k], torch.device):\n my_dict[k] = str(my_dict[k])\n else:\n pass # pray it can be serialized" }, { "identifier": "determine_num_input_channels", "path": "nnunetv2/utilities/label_handling/label_handling.py", "snippet": "def determine_num_input_channels(plans_manager: PlansManager,\n configuration_or_config_manager: Union[str, ConfigurationManager],\n dataset_json: dict) -> int:\n if isinstance(configuration_or_config_manager, str):\n config_manager = plans_manager.get_configuration(configuration_or_config_manager)\n else:\n config_manager = configuration_or_config_manager\n\n label_manager = plans_manager.get_label_manager(dataset_json)\n num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names'])\n\n # cascade has different number of input channels\n if config_manager.previous_stage_name is not None:\n num_label_inputs = len(label_manager.foreground_labels)\n num_input_channels = num_modalities + num_label_inputs\n else:\n num_input_channels = num_modalities\n return num_input_channels" }, { "identifier": "PlansManager", "path": "nnunetv2/utilities/plans_handling/plans_handler.py", "snippet": "class PlansManager(object):\n def __init__(self, plans_file_or_dict: Union[str, dict]):\n \"\"\"\n Why do we need this?\n 1) resolve inheritance in configurations\n 2) expose otherwise annoying stuff like getting the label manager or IO class from a string\n 3) clearly expose the things that are in the plans instead of hiding them in a dict\n 4) cache shit\n\n This class does not prevent you from going wild. You can still use the plans directly if you prefer\n (PlansHandler.plans['key'])\n \"\"\"\n self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict)\n\n def __repr__(self):\n return self.plans.__repr__()\n\n def _internal_resolve_configuration_inheritance(self, configuration_name: str,\n visited: Tuple[str, ...] = None) -> dict:\n if configuration_name not in self.plans['configurations'].keys():\n raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid '\n f'configuration names are {list(self.plans[\"configurations\"].keys())}.')\n configuration = deepcopy(self.plans['configurations'][configuration_name])\n if 'inherits_from' in configuration:\n parent_config_name = configuration['inherits_from']\n\n if visited is None:\n visited = (configuration_name,)\n else:\n if parent_config_name in visited:\n raise RuntimeError(f\"Circular dependency detected. The following configurations were visited \"\n f\"while solving inheritance (in that order!): {visited}. \"\n f\"Current configuration: {configuration_name}. Its parent configuration \"\n f\"is {parent_config_name}.\")\n visited = (*visited, configuration_name)\n\n base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited)\n base_config.update(configuration)\n configuration = base_config\n return configuration\n\n @lru_cache(maxsize=10)\n def get_configuration(self, configuration_name: str):\n if configuration_name not in self.plans['configurations'].keys():\n raise RuntimeError(f\"Requested configuration {configuration_name} not found in plans. \"\n f\"Available configurations: {list(self.plans['configurations'].keys())}\")\n\n configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name)\n return ConfigurationManager(configuration_dict)\n\n @property\n def dataset_name(self) -> str:\n return self.plans['dataset_name']\n\n @property\n def plans_name(self) -> str:\n return self.plans['plans_name']\n\n @property\n def original_median_spacing_after_transp(self) -> List[float]:\n return self.plans['original_median_spacing_after_transp']\n\n @property\n def original_median_shape_after_transp(self) -> List[float]:\n return self.plans['original_median_shape_after_transp']\n\n @property\n @lru_cache(maxsize=1)\n def image_reader_writer_class(self) -> Type[BaseReaderWriter]:\n return recursive_find_reader_writer_by_name(self.plans['image_reader_writer'])\n\n @property\n def transpose_forward(self) -> List[int]:\n return self.plans['transpose_forward']\n\n @property\n def transpose_backward(self) -> List[int]:\n return self.plans['transpose_backward']\n\n @property\n def available_configurations(self) -> List[str]:\n return list(self.plans['configurations'].keys())\n\n @property\n @lru_cache(maxsize=1)\n def experiment_planner_class(self) -> Type[ExperimentPlanner]:\n planner_name = self.experiment_planner_name\n experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], \"experiment_planning\"),\n planner_name,\n current_module=\"nnunetv2.experiment_planning\")\n return experiment_planner\n\n @property\n def experiment_planner_name(self) -> str:\n return self.plans['experiment_planner_used']\n\n @property\n @lru_cache(maxsize=1)\n def label_manager_class(self) -> Type[LabelManager]:\n return get_labelmanager_class_from_plans(self.plans)\n\n def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager:\n return self.label_manager_class(label_dict=dataset_json['labels'],\n regions_class_order=dataset_json.get('regions_class_order'),\n **kwargs)\n\n @property\n def foreground_intensity_properties_per_channel(self) -> dict:\n if 'foreground_intensity_properties_per_channel' not in self.plans.keys():\n if 'foreground_intensity_properties_by_modality' in self.plans.keys():\n return self.plans['foreground_intensity_properties_by_modality']\n return self.plans['foreground_intensity_properties_per_channel']" }, { "identifier": "ConfigurationManager", "path": "nnunetv2/utilities/plans_handling/plans_handler.py", "snippet": "class ConfigurationManager(object):\n def __init__(self, configuration_dict: dict):\n self.configuration = configuration_dict\n\n def __repr__(self):\n return self.configuration.__repr__()\n\n @property\n def data_identifier(self) -> str:\n return self.configuration['data_identifier']\n\n @property\n def preprocessor_name(self) -> str:\n return self.configuration['preprocessor_name']\n\n @property\n @lru_cache(maxsize=1)\n def preprocessor_class(self) -> Type[DefaultPreprocessor]:\n preprocessor_class = recursive_find_python_class(join(nnunetv2.__path__[0], \"preprocessing\"),\n self.preprocessor_name,\n current_module=\"nnunetv2.preprocessing\")\n return preprocessor_class\n\n @property\n def batch_size(self) -> int:\n return self.configuration['batch_size']\n\n @property\n def patch_size(self) -> List[int]:\n return self.configuration['patch_size']\n\n @property\n def median_image_size_in_voxels(self) -> List[int]:\n return self.configuration['median_image_size_in_voxels']\n\n @property\n def spacing(self) -> List[float]:\n return self.configuration['spacing']\n\n @property\n def normalization_schemes(self) -> List[str]:\n return self.configuration['normalization_schemes']\n\n @property\n def use_mask_for_norm(self) -> List[bool]:\n return self.configuration['use_mask_for_norm']\n\n @property\n def UNet_class_name(self) -> str:\n return self.configuration['UNet_class_name']\n\n @property\n @lru_cache(maxsize=1)\n def UNet_class(self) -> Type[nn.Module]:\n unet_class = recursive_find_python_class(join(dynamic_network_architectures.__path__[0], \"architectures\"),\n self.UNet_class_name,\n current_module=\"dynamic_network_architectures.architectures\")\n if unet_class is None:\n raise RuntimeError('The network architecture specified by the plans file '\n 'is non-standard (maybe your own?). Fix this by not using '\n 'ConfigurationManager.UNet_class to instantiate '\n 'it (probably just overwrite build_network_architecture of your trainer.')\n return unet_class\n\n @property\n def UNet_base_num_features(self) -> int:\n return self.configuration['UNet_base_num_features']\n\n @property\n def n_conv_per_stage_encoder(self) -> List[int]:\n return self.configuration['n_conv_per_stage_encoder']\n\n @property\n def n_conv_per_stage_decoder(self) -> List[int]:\n return self.configuration['n_conv_per_stage_decoder']\n\n @property\n def num_pool_per_axis(self) -> List[int]:\n return self.configuration['num_pool_per_axis']\n\n @property\n def pool_op_kernel_sizes(self) -> List[List[int]]:\n return self.configuration['pool_op_kernel_sizes']\n\n @property\n def conv_kernel_sizes(self) -> List[List[int]]:\n return self.configuration['conv_kernel_sizes']\n\n @property\n def unet_max_num_features(self) -> int:\n return self.configuration['unet_max_num_features']\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_data(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_data'])\n fn = partial(fn, **self.configuration['resampling_fn_data_kwargs'])\n return fn\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_probabilities(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_probabilities'])\n fn = partial(fn, **self.configuration['resampling_fn_probabilities_kwargs'])\n return fn\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_seg(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_seg'])\n fn = partial(fn, **self.configuration['resampling_fn_seg_kwargs'])\n return fn\n\n @property\n def batch_dice(self) -> bool:\n return self.configuration['batch_dice']\n\n @property\n def next_stage_names(self) -> Union[List[str], None]:\n ret = self.configuration.get('next_stage')\n if ret is not None:\n if isinstance(ret, str):\n ret = [ret]\n return ret\n\n @property\n def previous_stage_name(self) -> Union[str, None]:\n return self.configuration.get('previous_stage')" }, { "identifier": "create_lists_from_splitted_dataset_folder", "path": "nnunetv2/utilities/utils.py", "snippet": "def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[\n List[str]]:\n \"\"\"\n does not rely on dataset.json\n \"\"\"\n if identifiers is None:\n identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending)\n files = subfiles(folder, suffix=file_ending, join=False, sort=True)\n list_of_lists = []\n for f in identifiers:\n p = re.compile(re.escape(f) + r\"_\\d\\d\\d\\d\" + re.escape(file_ending))\n list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)])\n return list_of_lists" } ]
import inspect import multiprocessing import os import traceback import numpy as np import torch import nnunetv2 import argparse import multiprocessing import argparse import multiprocessing from copy import deepcopy from time import sleep from typing import Tuple, Union, List, Optional from acvl_utils.cropping_and_padding.padding import pad_nd_image from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \ save_json from torch import nn from torch._dynamo import OptimizedModule from torch.nn.parallel import DistributedDataParallel from tqdm import tqdm from nnunetv2.configuration import default_num_processes from nnunetv2.inference.data_iterators import PreprocessAdapterFromNpy, preprocessing_iterator_fromfiles, \ preprocessing_iterator_fromnpy from nnunetv2.inference.export_prediction import export_prediction_from_logits, \ convert_predicted_logits_to_segmentation_with_correct_shape from nnunetv2.inference.sliding_window_prediction import compute_gaussian, \ compute_steps_for_sliding_window from nnunetv2.utilities.file_path_utilities import get_output_folder, check_workers_alive_and_busy from nnunetv2.utilities.find_class_by_name import recursive_find_python_class from nnunetv2.utilities.helpers import empty_cache, dummy_context from nnunetv2.utilities.json_export import recursive_fix_for_json_export from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder from nnunetv2.paths import nnUNet_results, nnUNet_raw from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
11,147
segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images)) pp = preprocessing_iterator_fromnpy( list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing ) return pp def predict_from_list_of_npy_arrays(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): iterator = self.get_data_iterator_from_raw_npy_data(image_or_list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, num_processes) return self.predict_from_data_iterator(iterator, save_probabilities, num_processes_segmentation_export) def predict_from_data_iterator(self, data_iterator, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): """ each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! If 'ofile' is None, the result will be returned instead of written to a file """ with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool: worker_list = [i for i in export_pool._pool] r = [] for preprocessed in data_iterator: data = preprocessed['data'] if isinstance(data, str): delfile = data data = torch.from_numpy(np.load(data)) os.remove(delfile) ofile = preprocessed['ofile'] if ofile is not None: print(f'\nPredicting {os.path.basename(ofile)}:') else: print(f'\nPredicting image of shape {data.shape}:') print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}') properties = preprocessed['data_properties'] # let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with # npy files proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) while not proceed: # print('sleeping') sleep(0.1) proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) prediction = self.predict_logits_from_preprocessed_data(data).cpu() if ofile is not None: # this needs to go into background processes # export_prediction_from_logits(prediction, properties, configuration_manager, plans_manager, # dataset_json, ofile, save_probabilities) print('sending off prediction to background worker for resampling and export') r.append( export_pool.starmap_async( export_prediction_from_logits, ((prediction, properties, self.configuration_manager, self.plans_manager, self.dataset_json, ofile, save_probabilities),) ) ) else: # convert_predicted_logits_to_segmentation_with_correct_shape(prediction, plans_manager, # configuration_manager, label_manager, # properties, # save_probabilities) print('sending off prediction to background worker for resampling') r.append( export_pool.starmap_async( convert_predicted_logits_to_segmentation_with_correct_shape, ( (prediction, self.plans_manager, self.configuration_manager, self.label_manager, properties, save_probabilities),) ) ) if ofile is not None: print(f'done with {os.path.basename(ofile)}') else: print(f'\nDone with image of shape {data.shape}:') ret = [i.get()[0] for i in r] if isinstance(data_iterator, MultiThreadedAugmenter): data_iterator._finish() # clear lru cache compute_gaussian.cache_clear() # clear device cache
class nnUNetPredictor(object): def __init__(self, tile_step_size: float = 0.5, use_gaussian: bool = True, use_mirroring: bool = True, perform_everything_on_gpu: bool = True, device: torch.device = torch.device('cuda'), verbose: bool = False, verbose_preprocessing: bool = False, allow_tqdm: bool = True): self.verbose = verbose self.verbose_preprocessing = verbose_preprocessing self.allow_tqdm = allow_tqdm self.plans_manager, self.configuration_manager, self.list_of_parameters, self.network, self.dataset_json, \ self.trainer_name, self.allowed_mirroring_axes, self.label_manager = None, None, None, None, None, None, None, None self.tile_step_size = tile_step_size self.use_gaussian = use_gaussian self.use_mirroring = use_mirroring if device.type == 'cuda': # device = torch.device(type='cuda', index=0) # set the desired GPU with CUDA_VISIBLE_DEVICES! # why would I ever want to do that. Stupid dobby. This kills DDP inference... pass if device.type != 'cuda': print(f'perform_everything_on_gpu=True is only supported for cuda devices! Setting this to False') perform_everything_on_gpu = False self.device = device self.perform_everything_on_gpu = perform_everything_on_gpu def initialize_from_trained_model_folder(self, model_training_output_dir: str, use_folds: Union[Tuple[Union[int, str]], None], checkpoint_name: str = 'checkpoint_final.pth'): """ This is used when making predictions with a trained model """ if use_folds is None: use_folds = nnUNetPredictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) dataset_json = load_json(join(model_training_output_dir, 'dataset.json')) plans = load_json(join(model_training_output_dir, 'plans.json')) plans_manager = PlansManager(plans) if isinstance(use_folds, str): use_folds = [use_folds] parameters = [] for i, f in enumerate(use_folds): f = int(f) if f != 'all' else f checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name), map_location=torch.device('cpu')) if i == 0: trainer_name = checkpoint['trainer_name'] configuration_name = checkpoint['init_args']['configuration'] inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \ 'inference_allowed_mirroring_axes' in checkpoint.keys() else None parameters.append(checkpoint['network_weights']) configuration_manager = plans_manager.get_configuration(configuration_name) # restore network num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, 'nnunetv2.training.nnUNetTrainer') network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager, num_input_channels, enable_deep_supervision=False) self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) if ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) \ and not isinstance(self.network, OptimizedModule): print('compiling network') self.network = torch.compile(self.network) def manual_initialization(self, network: nn.Module, plans_manager: PlansManager, configuration_manager: ConfigurationManager, parameters: Optional[List[dict]], dataset_json: dict, trainer_name: str, inference_allowed_mirroring_axes: Optional[Tuple[int, ...]]): """ This is used by the nnUNetTrainer to initialize nnUNetPredictor for the final validation """ self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) allow_compile = True allow_compile = allow_compile and ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) allow_compile = allow_compile and not isinstance(self.network, OptimizedModule) if isinstance(self.network, DistributedDataParallel): allow_compile = allow_compile and isinstance(self.network.module, OptimizedModule) if allow_compile: print('compiling network') self.network = torch.compile(self.network) @staticmethod def auto_detect_available_folds(model_training_output_dir, checkpoint_name): print('use_folds is None, attempting to auto detect available folds') fold_folders = subdirs(model_training_output_dir, prefix='fold_', join=False) fold_folders = [i for i in fold_folders if i != 'fold_all'] fold_folders = [i for i in fold_folders if isfile(join(model_training_output_dir, i, checkpoint_name))] use_folds = [int(i.split('_')[-1]) for i in fold_folders] print(f'found the following folds: {use_folds}') return use_folds def _manage_input_and_output_lists(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[None, str, List[str]], folder_with_segs_from_prev_stage: str = None, overwrite: bool = True, part_id: int = 0, num_parts: int = 1, save_probabilities: bool = False): if isinstance(list_of_lists_or_source_folder, str): list_of_lists_or_source_folder = create_lists_from_splitted_dataset_folder(list_of_lists_or_source_folder, self.dataset_json['file_ending']) print(f'There are {len(list_of_lists_or_source_folder)} cases in the source folder') list_of_lists_or_source_folder = list_of_lists_or_source_folder[part_id::num_parts] caseids = [os.path.basename(i[0])[:-(len(self.dataset_json['file_ending']) + 5)] for i in list_of_lists_or_source_folder] print( f'I am process {part_id} out of {num_parts} (max process ID is {num_parts - 1}, we start counting with 0!)') print(f'There are {len(caseids)} cases that I would like to predict') if isinstance(output_folder_or_list_of_truncated_output_files, str): output_filename_truncated = [join(output_folder_or_list_of_truncated_output_files, i) for i in caseids] else: output_filename_truncated = output_folder_or_list_of_truncated_output_files seg_from_prev_stage_files = [join(folder_with_segs_from_prev_stage, i + self.dataset_json['file_ending']) if folder_with_segs_from_prev_stage is not None else None for i in caseids] # remove already predicted files form the lists if not overwrite and output_filename_truncated is not None: tmp = [isfile(i + self.dataset_json['file_ending']) for i in output_filename_truncated] if save_probabilities: tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] tmp = [i and j for i, j in zip(tmp, tmp2)] not_existing_indices = [i for i, j in enumerate(tmp) if not j] output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' f'That\'s {len(not_existing_indices)} cases.') return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files def predict_from_files(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], save_probabilities: bool = False, overwrite: bool = True, num_processes_preprocessing: int = default_num_processes, num_processes_segmentation_export: int = default_num_processes, folder_with_segs_from_prev_stage: str = None, num_parts: int = 1, part_id: int = 0): """ This is nnU-Net's default function for making predictions. It works best for batch predictions (predicting many images at once). """ if isinstance(output_folder_or_list_of_truncated_output_files, str): output_folder = output_folder_or_list_of_truncated_output_files elif isinstance(output_folder_or_list_of_truncated_output_files, list): output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) else: output_folder = None ######################## # let's store the input arguments so that its clear what was used to generate the prediction if output_folder is not None: my_init_kwargs = {} for k in inspect.signature(self.predict_from_files).parameters.keys(): my_init_kwargs[k] = locals()[k] my_init_kwargs = deepcopy( my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a recursive_fix_for_json_export(my_init_kwargs) maybe_mkdir_p(output_folder) save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) # we need these two if we want to do things with the predictions like for example apply postprocessing save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) ####################### # check if we need a prediction from the previous stage if self.configuration_manager.previous_stage_name is not None: assert folder_with_segs_from_prev_stage is not None, \ f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ f' they are located via folder_with_segs_from_prev_stage' # sort out input and output filenames list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ self._manage_input_and_output_lists(list_of_lists_or_source_folder, output_folder_or_list_of_truncated_output_files, folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, save_probabilities) if len(list_of_lists_or_source_folder) == 0: return data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, seg_from_prev_stage_files, output_filename_truncated, num_processes_preprocessing) return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) def _internal_get_data_iterator_from_lists_of_filenames(self, input_list_of_lists: List[List[str]], seg_from_prev_stage_files: Union[List[str], None], output_filenames_truncated: Union[List[str], None], num_processes: int): return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, output_filenames_truncated, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing) # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) # # hijack batchgenerators, yo # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This # # way we don't have to reinvent the wheel here. # num_processes = max(1, min(num_processes, len(input_list_of_lists))) # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, # output_filenames_truncated, self.plans_manager, self.dataset_json, # self.configuration_manager, num_processes) # if num_processes == 0: # mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images)) pp = preprocessing_iterator_fromnpy( list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing ) return pp def predict_from_list_of_npy_arrays(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): iterator = self.get_data_iterator_from_raw_npy_data(image_or_list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, num_processes) return self.predict_from_data_iterator(iterator, save_probabilities, num_processes_segmentation_export) def predict_from_data_iterator(self, data_iterator, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): """ each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! If 'ofile' is None, the result will be returned instead of written to a file """ with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool: worker_list = [i for i in export_pool._pool] r = [] for preprocessed in data_iterator: data = preprocessed['data'] if isinstance(data, str): delfile = data data = torch.from_numpy(np.load(data)) os.remove(delfile) ofile = preprocessed['ofile'] if ofile is not None: print(f'\nPredicting {os.path.basename(ofile)}:') else: print(f'\nPredicting image of shape {data.shape}:') print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}') properties = preprocessed['data_properties'] # let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with # npy files proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) while not proceed: # print('sleeping') sleep(0.1) proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) prediction = self.predict_logits_from_preprocessed_data(data).cpu() if ofile is not None: # this needs to go into background processes # export_prediction_from_logits(prediction, properties, configuration_manager, plans_manager, # dataset_json, ofile, save_probabilities) print('sending off prediction to background worker for resampling and export') r.append( export_pool.starmap_async( export_prediction_from_logits, ((prediction, properties, self.configuration_manager, self.plans_manager, self.dataset_json, ofile, save_probabilities),) ) ) else: # convert_predicted_logits_to_segmentation_with_correct_shape(prediction, plans_manager, # configuration_manager, label_manager, # properties, # save_probabilities) print('sending off prediction to background worker for resampling') r.append( export_pool.starmap_async( convert_predicted_logits_to_segmentation_with_correct_shape, ( (prediction, self.plans_manager, self.configuration_manager, self.label_manager, properties, save_probabilities),) ) ) if ofile is not None: print(f'done with {os.path.basename(ofile)}') else: print(f'\nDone with image of shape {data.shape}:') ret = [i.get()[0] for i in r] if isinstance(data_iterator, MultiThreadedAugmenter): data_iterator._finish() # clear lru cache compute_gaussian.cache_clear() # clear device cache
empty_cache(self.device)
11
2023-12-04 19:43:14+00:00
16k
Zuricho/chroma_pipeline
chroma/models/graph_classifier.py
[ { "identifier": "validate_XC", "path": "chroma/data/xcs.py", "snippet": "def validate_XCS(all_atom=None, sequence=True):\n def decorator(func):\n def new_func(*args, **kwargs):" }, { "identifier": "basic", "path": "chroma/layers/basic.py", "snippet": "class NoOp(nn.Module):\nclass Transpose(nn.Module):\nclass Unsqueeze(nn.Module):\nclass OneHot(nn.Module):\nclass MeanEmbedding(nn.Module):\nclass PeriodicPositionalEncoding(nn.Module):\nclass PositionWiseFeedForward(nn.Module):\nclass DropNormLin(nn.Module):\nclass ResidualLinearLayer(nn.Module):\nclass TriangleMultiplication(nn.Module):\nclass NodeProduct(nn.Module):\nclass FourierFeaturization(nn.Module):\nclass PositionalEncoding(nn.Module):\nclass MaybeOnehotEmbedding(nn.Embedding):\n def __init__(self):\n def forward(self, x, **kwargs):\n def __init__(self, d1=1, d2=2):\n def forward(self, x):\n def __init__(self, dim=1):\n def forward(self, x):\n def __init__(self, n_tokens):\n def forward(self, x):\n def __init__(self, embedding, use_softmax=True):\n def forward(self, x):\n def __init__(self, d_model, max_seq_len=4000, dropout=0.0):\n def forward(self, x):\n def __init__(self, d_model, d_hidden, dropout=0.1):\n def reset_parameters(self):\n def forward(self, x):\n def __init__(\n self, in_features, out_features, norm_type=\"ln\", dropout=0.0, actn=nn.ReLU()\n ):\n def forward(self, x, input_mask=None):\n def __init__(self, d_model, use_norm=True):\n def forward(self, x):\n def __init__(self, d_model=512, mode=\"outgoing\"):\n def forward(self, X, mask=None):\n def __init__(self, d_in, d_out):\n def forward(self, node_features, node_mask=None, edge_mask=None):\n def __init__(self, d_input, d_model, trainable=False, scale=1.0):\n def forward(self, inputs):\n def __init__(self, d_model, d_input=1, period_range=(1.0, 1000.0)):\n def forward(self, inputs):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n A = self.left_edge_mlp(h)\n B = self.right_edge_mlp(h)\n G = self.skip(h)\n A = A.masked_fill(~mask, 0.0)\n B = B.masked_fill(~mask, 0.0)\n B = 2 * math.pi * scale * torch.randn(d_input, d_model // 2)" }, { "identifier": "AttentionChainPool", "path": "chroma/layers/attention.py", "snippet": "class AttentionChainPool(nn.Module):\n \"\"\"Pools residue-based representations to chain-based representations using a chain mask and attention.\n Args:\n n_head (int): number of attention heads\n d_model (int): dimension of embeddings to be pooled\n\n Inputs:\n h (torch.tensor): of size (batch_size, sequence_length, d_model)\n C (torch.tensor): of size (batch_size, sequence_length)\n\n Outputs:\n output (torch.tensor): of size (batch_size, n_chains, d_model)\n chain_mask (torch.tensor): of size (batch_size, n_chains)\n \"\"\"\n\n def __init__(self, n_head, d_model):\n super().__init__()\n self.attention = MultiHeadAttention(\n n_head, d_model, d_model, d_model, dropout=0.0\n )\n\n def get_query(self, x):\n return torch.ones(x.size(0), 1, x.size(2)).type(x.dtype).to(x.device)\n\n def forward(self, h, C):\n bs, num_res = C.size()\n chains = C.abs().unique()\n chains = (\n chains[chains > 0].unsqueeze(-1).repeat(1, bs).reshape(-1).unsqueeze(-1)\n )\n num_chains = len(chains.unique())\n\n h_repeat = h.repeat(num_chains, 1, 1)\n C_repeat = C.repeat(num_chains, 1)\n mask = (C_repeat == chains).unsqueeze(-2)\n\n output, _ = self.attention(\n self.get_query(h_repeat), h_repeat, h_repeat, mask=mask\n )\n output = torch.cat(output.split(bs), 1)\n chain_mask = torch.stack(mask.squeeze(1).any(dim=-1).split(bs), -1)\n return output, chain_mask" }, { "identifier": "NodeProduct", "path": "chroma/layers/basic.py", "snippet": "class NodeProduct(nn.Module):\n \"\"\"Like Alg. 10 in Jumper et al. (2021) but instead of computing a mean over MSA dimension,\n process for single-sequence inputs.\n Args:\n d_in (int): dimension of node embeddings (inputs)\n d_out (int): dimension of edge embeddings (outputs)\n\n Inputs:\n node_features (torch.tensor): of size (batch_size, nres, d_model)\n node_mask (torch.tensor): of size (batch_size, nres)\n edge_mask (torch.tensor): of size (batch_size, nres, nres)\n\n Outputs:\n edge_features (torch.tensor): of size (batch_size, nres, nres, d_model)\n \"\"\"\n\n def __init__(self, d_in, d_out):\n super().__init__()\n self.layer_norm = nn.LayerNorm(d_in)\n self.left_lin = nn.Linear(d_in, d_in)\n self.right_lin = nn.Linear(d_in, d_in)\n self.edge_lin = nn.Linear(2 * d_in, d_out)\n\n def forward(self, node_features, node_mask=None, edge_mask=None):\n _, nres, _ = node_features.size()\n\n node_features = self.layer_norm(node_features)\n left_embs = self.left_lin(node_features)\n right_embs = self.right_lin(node_features)\n\n if node_mask is not None:\n mask = node_mask[:, :, None]\n left_embs = left_embs.masked_fill(~mask, 0.0)\n right_embs = right_embs.masked_fill(~mask, 0.0)\n\n left_embs = left_embs[:, None, :, :].repeat(1, nres, 1, 1)\n right_embs = right_embs[:, :, None, :].repeat(1, 1, nres, 1)\n edge_features = torch.cat([left_embs, right_embs], dim=-1)\n edge_features = self.edge_lin(edge_features)\n\n if edge_mask is not None:\n mask = edge_mask[:, :, :, None]\n edge_features = edge_features.masked_fill(~mask, 0.0)\n\n return edge_features" }, { "identifier": "NoOp", "path": "chroma/layers/basic.py", "snippet": "class NoOp(nn.Module):\n \"\"\"A dummy nn.Module wrapping an identity operation.\n\n Inputs:\n x (any)\n\n Outputs:\n x (any)\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def forward(self, x, **kwargs):\n return x" }, { "identifier": "MLP", "path": "chroma/layers/graph.py", "snippet": "class MLP(nn.Module):\n \"\"\"Multilayer perceptron with variable input, hidden, and output dims.\n\n Args:\n dim_in (int): Feature dimension of input tensor.\n dim_hidden (int or None): Feature dimension of intermediate layers.\n Defaults to matching output dimension.\n dim_out (int or None): Feature dimension of output tensor.\n Defaults to matching input dimension.\n num_layers_hidden (int): Number of hidden MLP layers.\n activation (str): MLP nonlinearity.\n `'relu'`: Rectified linear unit.\n `'softplus'`: Softplus.\n dropout (float): Dropout rate. Default is 0.\n\n Inputs:\n h (torch.Tensor): Input tensor with shape `(..., dim_in)`\n\n Outputs:\n h (torch.Tensor): Input tensor with shape `(..., dim_in)`\n \"\"\"\n\n def __init__(\n self,\n dim_in: int,\n dim_hidden: Optional[int] = None,\n dim_out: Optional[int] = None,\n num_layers_hidden: int = 1,\n activation: str = \"relu\",\n dropout: float = 0.0,\n ):\n super(MLP, self).__init__()\n\n # Default is dimension preserving\n dim_out = dim_out if dim_out is not None else dim_in\n dim_hidden = dim_hidden if dim_hidden is not None else dim_out\n\n nonlinearites = {\"relu\": nn.ReLU, \"softplus\": nn.Softplus}\n activation_func = nonlinearites[activation]\n\n if num_layers_hidden == 0:\n layers = [nn.Linear(dim_in, dim_out)]\n else:\n layers = []\n for i in range(num_layers_hidden):\n d_1 = dim_in if i == 0 else dim_hidden\n layers = layers + [\n nn.Linear(d_1, dim_hidden),\n activation_func(),\n nn.Dropout(dropout),\n ]\n layers = layers + [nn.Linear(dim_hidden, dim_out)]\n self.layers = nn.Sequential(*layers)\n\n def forward(self, h: torch.Tensor) -> torch.Tensor:\n return self.layers(h)" }, { "identifier": "MaskedNorm", "path": "chroma/layers/graph.py", "snippet": "class MaskedNorm(nn.Module):\n \"\"\"Masked normalization layer.\n\n Args:\n dim (int): Dimensionality of the normalization. Can be 1 for 1D\n normalization along dimension 1 or 2 for 2D normalization along\n dimensions 1 and 2.\n num_features (int): Channel dimension; only needed if `affine` is True.\n affine (bool): If True, inclde a learnable affine transformation\n post-normalization. Default is False.\n norm (str): Type of normalization, can be `instance`, `layer`, or\n `transformer`.\n eps (float): Small number for numerical stability.\n\n Inputs:\n data (torch.Tensor): Input tensor with shape\n `(num_batch, num_nodes, num_channels)` (1D) or\n `(num_batch, num_nodes, num_nodes, num_channels)` (2D).\n mask (torch.Tensor): Mask tensor with shape\n `(num_batch, num_nodes)` (1D) or\n `(num_batch, num_nodes, num_nodes)` (2D).\n\n Outputs:\n norm_data (torch.Tensor): Mask-normalized tensor with shape\n `(num_batch, num_nodes, num_channels)` (1D) or\n `(num_batch, num_nodes, num_nodes, num_channels)` (2D).\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_features: int = -1,\n affine: bool = False,\n norm: str = \"instance\",\n eps: float = 1e-5,\n ):\n super(MaskedNorm, self).__init__()\n\n self.norm_type = norm\n self.dim = dim\n self.norm = norm + str(dim)\n self.affine = affine\n self.eps = eps\n\n # Dimension to sum\n if self.norm == \"instance1\":\n self.sum_dims = [1]\n elif self.norm == \"layer1\":\n self.sum_dims = [1, 2]\n elif self.norm == \"transformer1\":\n self.sum_dims = [-1]\n elif self.norm == \"instance2\":\n self.sum_dims = [1, 2]\n elif self.norm == \"layer2\":\n self.sum_dims = [1, 2, 3]\n elif self.norm == \"transformer2\":\n self.sum_dims = [-1]\n else:\n raise NotImplementedError\n\n # Number of features, only required if affine\n self.num_features = num_features\n\n # Affine transformation is a linear layer on the C channel\n if self.affine:\n self.weights = nn.Parameter(torch.rand(self.num_features))\n self.bias = nn.Parameter(torch.zeros(self.num_features))\n\n def forward(\n self, data: torch.Tensor, mask: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n # Add optional trailing singleton dimension and expand if necessary\n if mask is not None:\n if len(mask.shape) == len(data.shape) - 1:\n mask = mask.unsqueeze(-1)\n if data.shape != mask.shape:\n mask = mask.expand(data.shape)\n\n # Input shape is Batch, Channel, Dim1, (dim2 if 2d)\n dims = self.sum_dims\n if (mask is None) or (self.norm_type == \"transformer\"):\n mask_mean = data.mean(dim=dims, keepdim=True)\n mask_std = torch.sqrt(\n (((data - mask_mean)).pow(2)).mean(dim=dims, keepdim=True) + self.eps\n )\n\n # Norm\n norm_data = (data - mask_mean) / mask_std\n\n else:\n # Zeroes vector to sum all mask data\n norm_data = torch.zeros_like(data).to(data.device).type(data.dtype)\n for mask_id in mask.unique():\n # Skip zero, since real mask\n if mask_id == 0:\n continue\n\n # Transform mask to temp mask that match mask id\n tmask = (mask == mask_id).type(torch.float32)\n\n # Sum mask for mean\n mask_sum = tmask.sum(dim=dims, keepdim=True)\n\n # Data is tmask, so that mean is only for unmasked pos\n mask_mean = (data * tmask).sum(dim=dims, keepdim=True) / mask_sum\n mask_std = torch.sqrt(\n (((data - mask_mean) * tmask).pow(2)).sum(dim=dims, keepdim=True)\n / mask_sum\n + self.eps\n )\n\n # Calculate temp norm, apply mask\n tnorm = ((data - mask_mean) / mask_std) * tmask\n # Sometime mask is empty, so generate nan that are conversted to 0\n tnorm[tnorm != tnorm] = 0\n\n # Add to init zero norm data\n norm_data += tnorm\n\n # Apply affine\n if self.affine:\n norm_data = norm_data * self.weights + self.bias\n\n # If mask, apply mask\n if mask is not None:\n norm_data = norm_data * (mask != 0).type(data.dtype)\n return norm_data" }, { "identifier": "diffusion", "path": "chroma/layers/structure/diffusion.py", "snippet": "class GaussianNoiseSchedule:\nclass NoiseTimeEmbedding(nn.Module):\nclass DiffusionChainCov(nn.Module):\nclass ReconstructionLosses(nn.Module):\n def __init__(\n self, log_snr_range: Tuple[float, float] = (-7.0, 13.5), kind: str = \"log_snr\",\n ) -> None:\n def t_map(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def derivative(self, t: torch.Tensor, func: Callable) -> torch.Tensor:\n def tensor_check(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha_func(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma_func(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha_deriv(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma_deriv(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def beta(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def g(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def log_SNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def compute_t_range(self, log_snr: Union[float, torch.Tensor]) -> torch.Tensor:\n def SNR_derivative(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SSNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SSNR_inv(self, ssnr: torch.Tensor) -> torch.Tensor:\n def SSNR_inv_deriv(self, ssnr: Union[float, torch.Tensor]) -> torch.Tensor:\n def prob_SSNR(self, ssnr: Union[float, torch.Tensor]) -> torch.Tensor:\n def linear_logsnr_grid(self, N: int, tspan: Tuple[float, float]) -> torch.Tensor:\n def __init__(\n self,\n dim_embedding: int,\n noise_schedule: GaussianNoiseSchedule,\n rff_scale: float = 0.8,\n feature_type: str = \"log_snr\",\n ) -> None:\n def forward(\n self, t: torch.Tensor, log_alpha: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n def __init__(\n self,\n log_snr_range: Tuple[float, float] = (-7.0, 13.5),\n noise_schedule: str = \"log_snr\",\n sigma_translation: float = 1.0,\n covariance_model: str = \"brownian\",\n complex_scaling: bool = False,\n **kwargs,\n ) -> None:\n def sample_t(\n self,\n C: torch.LongTensor,\n t: Optional[torch.Tensor] = None,\n inverse_CDF: Optional[Callable] = None,\n ) -> torch.Tensor:\n def sde_forward(self, X, C, t, Z=None):\n def _schedule_coefficients(\n self,\n t: torch.Tensor,\n inverse_temperature: float = 1.0,\n langevin_isothermal: bool = True,\n ) -> Tuple[\n def langevin(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n ):\n def reverse_sde(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n ):\n def ode(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n detach_X0: bool = True,\n ):\n def energy(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.Tensor,\n t: torch.Tensor,\n detach_X0: bool = True,\n align_X0: bool = True,\n ) -> torch.Tensor:\n def score(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.Tensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n detach_X0: bool = True,\n align_X0: bool = True,\n U_traj: List = [],\n ) -> torch.Tensor:\n def elbo(self, X0_pred, X0, C, t):\n def pseudoelbo(self, loss_per_residue, C, t):\n def _baoab_sample_step(\n self,\n _x,\n p,\n C,\n t,\n dt,\n score_func,\n gamma=2.0,\n kT=1.0,\n n_equil=1,\n ode_boost=True,\n langevin_isothermal=False,\n ):\n def baoab_step(_x, p, t):\n def ode_step(t, _x):\n def sample_sde(\n self,\n X0_func: Callable,\n C: torch.LongTensor,\n X_init: Optional[torch.Tensor] = None,\n conditioner: Optional[Callable] = None,\n N: int = 100,\n tspan: Tuple[float, float] = (1.0, 0.001),\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n sde_func: str = \"reverse_sde\",\n integrate_func: str = \"euler_maruyama\",\n initialize_noise: bool = True,\n remap_time: bool = False,\n remove_drift_translate: bool = False,\n remove_noise_translate: bool = False,\n align_X0: bool = True,\n ) -> Dict[str, torch.Tensor]:\n def _X0_func(_X, _C, t):\n def sdefun(_t, _X):\n def estimate_pseudoelbo_X(\n self,\n X0_func,\n X,\n C,\n num_samples=50,\n deterministic_seed=0,\n return_elbo_t=False,\n noise=True,\n ):\n def _score_direct(\n self, Xt, X0_func, C, t, align_X0=True,\n ):\n def estimate_logp(\n self,\n X0_func: Callable,\n X_sample: torch.Tensor,\n C: torch.LongTensor,\n N: int,\n return_trace_t: bool = False,\n ):\n def divergence(fn, x, t):\n def flow_gradient(\n X, X0_func, C, t,\n ):\n def odefun(_t, _X):\n def estimate_elbo(\n self,\n X0_func: Callable,\n X: torch.Tensor,\n C: torch.LongTensor,\n num_samples: int = 50,\n deterministic_seed: int = 0,\n return_elbo_t: bool = False,\n grad_logprob_Y_func: Optional[Callable] = None,\n ) -> torch.Tensor:\n def conditional_X0(\n self, X0: torch.Tensor, score: torch.Tensor, C: torch.tensor, t: torch.Tensor\n ) -> torch.Tensor:\n def _mean(self, X, C, alpha):\n def _X_to_Z(self, X_sample, X, C, alpha, sigma):\n def _Z_to_X(self, Z, X, C, alpha, sigma):\n def sample_conditional(\n self, X: torch.Tensor, C: torch.LongTensor, t: torch.Tensor, s: torch.Tensor\n ) -> torch.Tensor:\n def forward(\n self, X: torch.Tensor, C: torch.LongTensor, t: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n def __init__(\n self,\n diffusion: DiffusionChainCov,\n loss_scale: float = 10.0,\n rmsd_method: str = \"symeig\",\n ):\n def _batch_average(self, loss, C):\n def _loss_elbo(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_rmsd(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_pseudoelbo(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_fragment(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_pair(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_neighborhood(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_distance(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_hbonds(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def estimate_metrics(\n self,\n X0_func: Callable,\n X: torch.Tensor,\n C: torch.LongTensor,\n num_samples: int = 50,\n deterministic_seed: int = 0,\n use_noise: bool = True,\n return_samples: bool = False,\n tspan: Tuple[float] = (1e-4, 1.0),\n ):\n def forward(\n self,\n X0_pred: torch.Tensor,\n X: torch.Tensor,\n C: torch.LongTensor,\n t: torch.Tensor,\n ):\ndef _debug_viz_gradients(\n pml_file, X_list, dX_list, C, S, arrow_length=2.0, name=\"gradient\", color=\"red\"\n):\ndef _debug_viz_XZC(X, Z, C, rgb=True):\n SNR = self.log_SNR(t).exp()\n SNR = self.alpha(t).pow(2) / (self.sigma(t).pow(2))\n Z = torch.randn_like(X)\n Z = Z.reshape(X.shape[0], -1, 3)\n R_Z = self.base_gaussian._multiply_R(Z, C).reshape(X.shape)\n X = backbone.center_X(X, C)\n Z = torch.randn_like(X) if Z is None else Z\n Z = torch.randn_like(X) if Z is None else Z\n X = backbone.center_X(X, C)\n X = backbone.impute_masked_X(X, C)\n X0 = X0_func(X, C, t=t)\n X0 = X0_func(X, C, t=t)\n X0, _ = self.loss_rmsd.align(X0, X, C, align_unmasked=True)\n X0 = X0.detach()\n Z = self._X_to_Z(X, X0, C, alpha, sigma)\n X = backbone.impute_masked_X(X, C)\n X = X.detach().clone()\n X0 = backbone.impute_masked_X(X0, C)\n Z = torch.randn_like(_x)\n _X0 = X0_func(_X, _C, t)\n T = np.linspace(1e-4, 1.0, num_samples)\n X0 = X0_func(Xt, C, t)\n X0, _ = self.loss_rmsd.align(X0, Xt, C, align_unmasked=True)\n C = C.abs()\n X = backbone.impute_masked_X(X, C)\n T = np.linspace(1e-4, 1.0, num_samples)\n X = backbone.impute_masked_X(X, C)\n Z = self.base_gaussian._multiply_R_inverse(X_noise, C)\n X = backbone.impute_masked_X(X, C)\n X = backbone.center_X(X, C)\n X = backbone.impute_masked_X(X, C)\n T = np.linspace(1e-4, 1.0, num_samples)\n X = X.reshape(X.shape[0], -1, 3)\n Z = Z.reshape(Z.shape[0], -1, 3)\n C = C_expand.reshape(C.shape[0], -1)\n N = X.shape[1]" }, { "identifier": "BackboneEncoderGNN", "path": "chroma/models/graph_design.py", "snippet": "class BackboneEncoderGNN(nn.Module):\n \"\"\"Graph Neural Network for processing protein structure into graph embeddings.\n\n Args:\n See documention of `structure.protein_graph.ProteinFeatureGraph`,\n and `graph.GraphNN` for more details.\n\n dim_nodes (int): Hidden dimension of node tensors.\n dim_edges (int): Hidden dimension of edge tensors.\n num_neighbors (int): Number of neighbors per nodes.\n node_features (tuple): List of node feature specifications. Features\n can be given as strings or as dictionaries.\n edge_features (tuple): List of edge feature specifications. Features\n can be given as strings or as dictionaries.\n num_layers (int): Number of layers.\n node_mlp_layers (int): Number of hidden layers for node update\n function.\n node_mlp_dim (int, optional): Dimension of hidden layers for node update\n function, defaults to match output dimension.\n edge_update (bool): Whether to include an edge update step.\n edge_mlp_layers (int): Number of hidden layers for edge update\n function.\n edge_mlp_dim (int, optional): Dimension of hidden layers for edge update\n function, defaults to match output dimension.\n skip_connect_input (bool): Whether to include skip connections between\n layers.\n mlp_activation (str): MLP nonlinearity function, `relu` or `softplus`\n accepted.\n dropout (float): Dropout fraction.\n graph_distance_atom_type (int): Atom type for computing residue-residue\n distances for graph construction. Negative values will specify\n centroid across atom types. Default is `-1` (centroid).\n graph_cutoff (float, optional): Cutoff distance for graph construction:\n mask any edges further than this cutoff. Default is `None`.\n graph_mask_interfaces (bool): Restrict connections only to within\n chains, excluding-between chain interactions. Default is `False`.\n graph_criterion (str): Method used for building graph from distances.\n Currently supported methods are `{knn, random_log, random_linear}`.\n Default is `knn`.\n graph_random_min_local (int): Minimum number of neighbors in GNN that\n come from local neighborhood, before random neighbors are chosen.\n checkpoint_gradients (bool): Switch to implement gradient checkpointing\n during training.\n\n Inputs:\n X (torch.Tensor): Backbone coordinates with shape\n `(num_batch, num_residues, num_atoms, 3)`.\n C (torch.LongTensor): Chain map with shape `(num_batch, num_residues)`.\n node_h_aux (torch.LongTensor, optional): Auxiliary node features with\n shape `(num_batch, num_residues, dim_nodes)`.\n edge_h_aux (torch.LongTensor, optional): Auxiliary edge features with\n shape `(num_batch, num_residues, num_neighbors, dim_edges)`.\n edge_idx (torch.LongTensor, optional): Input edge indices for neighbors\n with shape `(num_batch, num_residues, num_neighbors)`.\n mask_ij (torch.Tensor, optional): Input edge mask with shape\n `(num_batch, num_nodes, num_neighbors)`.\n\n Outputs:\n node_h (torch.Tensor): Node features with shape\n `(num_batch, num_residues, dim_nodes)`.\n edge_h (torch.Tensor): Edge features with shape\n `(num_batch, num_residues, num_neighbors, dim_edges)`.\n edge_idx (torch.LongTensor): Edge indices for neighbors with shape\n `(num_batch, num_residues, num_neighbors)`.\n mask_i (torch.Tensor): Node mask with shape `(num_batch, num_residues)`.\n mask_ij (torch.Tensor): Edge mask with shape\n `(num_batch, num_nodes, num_neighbors)`.\n \"\"\"\n\n def __init__(\n self,\n dim_nodes: int = 128,\n dim_edges: int = 128,\n num_neighbors: int = 30,\n node_features: tuple = ((\"internal_coords\", {\"log_lengths\": True}),),\n edge_features: tuple = (\n \"distances_2mer\",\n \"orientations_2mer\",\n \"distances_chain\",\n ),\n num_layers: int = 3,\n node_mlp_layers: int = 1,\n node_mlp_dim: Optional[int] = None,\n edge_update: bool = True,\n edge_mlp_layers: int = 1,\n edge_mlp_dim: Optional[int] = None,\n skip_connect_input: bool = False,\n mlp_activation: str = \"softplus\",\n dropout: float = 0.1,\n graph_distance_atom_type: int = -1,\n graph_cutoff: Optional[float] = None,\n graph_mask_interfaces: bool = False,\n graph_criterion: str = \"knn\",\n graph_random_min_local: int = 20,\n checkpoint_gradients: bool = False,\n **kwargs\n ) -> None:\n \"\"\"Initialize BackboneEncoderGNN.\"\"\"\n super(BackboneEncoderGNN, self).__init__()\n\n # Save configuration in kwargs\n self.kwargs = locals()\n self.kwargs.pop(\"self\")\n for key in list(self.kwargs.keys()):\n if key.startswith(\"__\") and key.endswith(\"__\"):\n self.kwargs.pop(key)\n args = SimpleNamespace(**self.kwargs)\n\n # Important global options\n self.dim_nodes = dim_nodes\n self.dim_edges = dim_edges\n self.checkpoint_gradients = checkpoint_gradients\n\n graph_kwargs = {\n \"distance_atom_type\": args.graph_distance_atom_type,\n \"cutoff\": args.graph_cutoff,\n \"mask_interfaces\": args.graph_mask_interfaces,\n \"criterion\": args.graph_criterion,\n \"random_min_local\": args.graph_random_min_local,\n }\n\n self.feature_graph = protein_graph.ProteinFeatureGraph(\n dim_nodes=args.dim_nodes,\n dim_edges=args.dim_edges,\n num_neighbors=args.num_neighbors,\n graph_kwargs=graph_kwargs,\n node_features=args.node_features,\n edge_features=args.edge_features,\n )\n\n self.gnn = graph.GraphNN(\n dim_nodes=args.dim_nodes,\n dim_edges=args.dim_edges,\n num_layers=args.num_layers,\n node_mlp_layers=args.node_mlp_layers,\n node_mlp_dim=args.node_mlp_dim,\n edge_update=args.edge_update,\n edge_mlp_layers=args.edge_mlp_layers,\n edge_mlp_dim=args.edge_mlp_dim,\n mlp_activation=args.mlp_activation,\n dropout=args.dropout,\n norm=\"transformer\",\n scale=args.num_neighbors,\n skip_connect_input=args.skip_connect_input,\n checkpoint_gradients=checkpoint_gradients,\n )\n\n @validate_XC(all_atom=False)\n def forward(\n self,\n X: torch.Tensor,\n C: torch.LongTensor,\n node_h_aux: Optional[torch.Tensor] = None,\n edge_h_aux: Optional[torch.Tensor] = None,\n edge_idx: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ) -> Tuple[\n torch.Tensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.Tensor\n ]:\n \"\"\"Encode XC backbone structure into node and edge features.\"\"\"\n num_batch, num_residues = C.shape\n\n # Hack to enable checkpointing\n if self.checkpoint_gradients and (not X.requires_grad):\n X.requires_grad = True\n\n node_h, edge_h, edge_idx, mask_i, mask_ij = self._checkpoint(\n self.feature_graph, X, C, edge_idx, mask_ij\n )\n\n if node_h_aux is not None:\n node_h = node_h + mask_i.unsqueeze(-1) * node_h_aux\n if edge_h_aux is not None:\n edge_h = edge_h + mask_ij.unsqueeze(-1) * edge_h_aux\n\n node_h, edge_h = self.gnn(node_h, edge_h, edge_idx, mask_i, mask_ij)\n return node_h, edge_h, edge_idx, mask_i, mask_ij\n\n def _checkpoint(self, module: nn.Module, *args) -> nn.Module:\n if self.checkpoint_gradients:\n return checkpoint(module, *args)\n else:\n return module(*args)" }, { "identifier": "load_model", "path": "chroma/utility/model.py", "snippet": "def load_model(\n weights,\n model_class,\n device=\"cpu\",\n strict=False,\n strict_unexpected=True,\n verbose=True,\n):\n \"\"\"Load model saved with save_model.\n\n Args:\n weights (str): The destination path of the model weights to load.\n Compatible with files saved by `save_model`.\n model_class: Name of model class.\n device (str, optional): Pytorch device specification, e.g. `'cuda'` for\n GPU. Default is `'cpu'`.\n strict (bool): Whether to require that the keys match between the\n input file weights and the model created from the parameters stored\n in the model kwargs.\n strict_unexpected (bool): Whether to require that there are no\n unexpected keys when loading model weights, as distinct from the\n strict option which doesn't allow for missing keys either. By\n default, we use this option rather than strict for ease of\n development when adding model features.\n verbose (bool, optional): Show outputs from download and loading. Default True.\n\n Returns:\n model (nn.Module): Torch model with loaded weights.\n \"\"\"\n\n # Process weights path\n if str(weights).startswith(\"named:\"):\n weights = weights.split(\"named:\")[1]\n if weights not in NAMED_MODELS[model_class.__name__]:\n raise Exception(f\"Unknown {model_class.__name__} model name: {weights},\")\n weights = NAMED_MODELS[model_class.__name__][weights][\"s3_uri\"]\n\n # resolve s3 paths\n if str(weights).startswith(\"s3:\"):\n raise NotImplementedError(\"Loading Models from an S3 link not supported.\")\n\n # download public models from generate\n if str(weights).startswith(\"https:\"):\n # Decompose into arguments\n parsed_url = urlparse(weights)\n base_url = f\"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}\"\n model_name = parse_qs(parsed_url.query).get(\"weights\", [None])[0]\n weights = api.download_from_generate(\n base_url, model_name, force=False, exist_ok=True\n )\n\n # load model weights\n params = torch.load(weights, map_location=\"cpu\")\n model = model_class(**params[\"init_kwargs\"]).to(device)\n missing_keys, unexpected_keys = model.load_state_dict(\n params[\"model_state_dict\"], strict=strict\n )\n if strict_unexpected and len(unexpected_keys) > 0:\n raise Exception(\n f\"Error loading model from checkpoint file: {weights} contains {len(unexpected_keys)} unexpected keys: {unexpected_keys}\"\n )\n return model" } ]
from types import SimpleNamespace from chroma.data.xcs import validate_XC from chroma.layers import basic from chroma.layers.attention import AttentionChainPool from chroma.layers.basic import NodeProduct, NoOp from chroma.layers.graph import MLP, MaskedNorm from chroma.layers.structure import diffusion from chroma.models.graph_design import BackboneEncoderGNN from chroma.utility.model import load_model as utility_load_model import torch import torch.nn as nn
10,850
if "random_fourier_2mer" in args.edge_features: index = args.edge_features.index("random_fourier_2mer") args.edge_features.pop(index) args.edge_features.append( ( "random_fourier_2mer", { "dim_embedding": args.dim_edges, "trainable": False, "scale": args.fourier_scale, }, ) ) # Encoder GNN process backbone self.encoder = BackboneEncoderGNN( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, num_neighbors=args.num_neighbors, node_features=args.node_features, edge_features=args.edge_features, num_layers=args.num_layers, node_mlp_layers=args.node_mlp_layers, node_mlp_dim=args.node_mlp_dim, edge_update=args.edge_update, edge_mlp_layers=args.edge_mlp_layers, edge_mlp_dim=args.edge_mlp_dim, mlp_activation=args.mlp_activation, dropout=args.dropout, skip_connect_input=args.skip_connect_input, graph_criterion=args.graph_criterion, graph_random_min_local=args.graph_random_min_local, checkpoint_gradients=checkpoint_gradients, ) self.time_feature_type = args.time_feature_type self.time_log_feature_scaling = time_log_feature_scaling self.use_time_features = use_time_features if self.use_time_features: self.time_features = basic.FourierFeaturization( d_input=1, d_model=dim_nodes, trainable=False, scale=16.0 ) self.sequence_embedding = nn.Embedding(20, dim_nodes) self.noise_perturb = diffusion.DiffusionChainCov( noise_schedule=args.noise_schedule, beta_min=args.noise_beta_min, beta_max=args.noise_beta_max, log_snr_range=args.noise_log_snr_range, covariance_model=args.noise_covariance_model, ) self._init_heads(class_config, dim_nodes, out_mlp_layers, dropout) self.condition_sequence_frequency = 0.3 def _init_heads(self, class_config, dim_nodes, out_mlp_layers, dropout): self.heads = {"chain": {}, "first_order": {}, "second_order": {}, "complex": {}} for label, config in class_config.items(): group = config["level"] if label == "is_interface" or label == "contact": dim_out = 1 else: dim_out = len(config["tokens"]) if group == "chain": pool = AttentionChainPool(8, dim_nodes) elif group == "complex": raise NotImplementedError elif group == "second_order": pool = NoOp() else: pool = NoOp() if group != "second_order": if self.zero_grad_fix: node_norm_layer = MaskedNorm( dim=1, num_features=dim_nodes, affine=True, norm="layer" ) mlp = MLP( dim_nodes, dim_hidden=None, dim_out=dim_out, num_layers_hidden=out_mlp_layers, activation=self.mlp_activation, dropout=dropout, ) head = nn.Sequential(node_norm_layer, mlp) else: mlp = MLP( dim_nodes, dim_hidden=None, dim_out=dim_out, num_layers_hidden=out_mlp_layers, activation="relu", dropout=dropout, ) head = mlp else: head = nn.Sequential(nn.Linear(dim_nodes, 16), NodeProduct(16, 1)) self.heads[group][label] = head, pool self.add_module(f"{label}_head", head) if pool is not None: self.add_module(f"{label}_pool", pool) def _time_features(self, t): h = { "t": lambda: t, "log_snr": lambda: self.noise_perturb.noise_schedule.log_SNR(t), }[self.time_feature_type]() if "log" in self.time_feature_type: h = self.time_log_feature_scaling * h time_h = self.time_features(h[:, None, None]) return time_h
# Copyright Generate Biomedicines, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Models for generating protein sequence and side chain conformations given backbones. These can be used for sequence design and packing. """ class GraphClassifier(nn.Module): """Graph-based protein classification Args: See documention of `structure.protein_graph.ProteinFeatureGraph`, and `graph.GraphNN` for more details. Inputs: X (Tensor): Backbone coordinates with shape `(num_batch, num_residues, num_atoms, 3)`. C (LongTensor): Chain map with shape `(num_batch, num_residues)`. O (Tensor) (optional): One-hot sequence tensor of shape `(num_batch, num_residues)` Outputs: node_h (Tensor): residue-based representations that can be used to project various classification predictions """ def __init__( self, dim_nodes=128, dim_edges=128, num_neighbors=30, node_features=(("internal_coords", {"log_lengths": True}),), edge_features=["random_fourier_2mer", "orientations_2mer", "distances_chain"], num_layers=3, dropout=0.1, node_mlp_layers=1, node_mlp_dim=None, edge_update=True, edge_mlp_layers=1, edge_mlp_dim=None, skip_connect_input=False, mlp_activation="softplus", graph_criterion="knn", graph_random_min_local=20, use_time_features=True, noise_schedule="log_snr", noise_beta_min=0.2, noise_beta_max=70.0, checkpoint_gradients=False, class_config={}, out_mlp_layers=2, noise_covariance_model="globular", noise_log_snr_range=(-7.0, 13.5), time_feature_type="t", time_log_feature_scaling=0.05, fourier_scale=16.0, zero_grad_fix=False, **kwargs, ): """Initialize GraphBackbone network.""" super().__init__() # Save configuration in kwargs self.kwargs = locals() self.kwargs.pop("self") for key in list(self.kwargs.keys()): if key.startswith("__") and key.endswith("__"): self.kwargs.pop(key) args = SimpleNamespace(**self.kwargs) self.class_config = class_config # Important global options self.dim_nodes = args.dim_nodes self.dim_edges = args.dim_edges self.mlp_activation = args.mlp_activation self.zero_grad_fix = zero_grad_fix if "random_fourier_2mer" in args.edge_features: index = args.edge_features.index("random_fourier_2mer") args.edge_features.pop(index) args.edge_features.append( ( "random_fourier_2mer", { "dim_embedding": args.dim_edges, "trainable": False, "scale": args.fourier_scale, }, ) ) # Encoder GNN process backbone self.encoder = BackboneEncoderGNN( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, num_neighbors=args.num_neighbors, node_features=args.node_features, edge_features=args.edge_features, num_layers=args.num_layers, node_mlp_layers=args.node_mlp_layers, node_mlp_dim=args.node_mlp_dim, edge_update=args.edge_update, edge_mlp_layers=args.edge_mlp_layers, edge_mlp_dim=args.edge_mlp_dim, mlp_activation=args.mlp_activation, dropout=args.dropout, skip_connect_input=args.skip_connect_input, graph_criterion=args.graph_criterion, graph_random_min_local=args.graph_random_min_local, checkpoint_gradients=checkpoint_gradients, ) self.time_feature_type = args.time_feature_type self.time_log_feature_scaling = time_log_feature_scaling self.use_time_features = use_time_features if self.use_time_features: self.time_features = basic.FourierFeaturization( d_input=1, d_model=dim_nodes, trainable=False, scale=16.0 ) self.sequence_embedding = nn.Embedding(20, dim_nodes) self.noise_perturb = diffusion.DiffusionChainCov( noise_schedule=args.noise_schedule, beta_min=args.noise_beta_min, beta_max=args.noise_beta_max, log_snr_range=args.noise_log_snr_range, covariance_model=args.noise_covariance_model, ) self._init_heads(class_config, dim_nodes, out_mlp_layers, dropout) self.condition_sequence_frequency = 0.3 def _init_heads(self, class_config, dim_nodes, out_mlp_layers, dropout): self.heads = {"chain": {}, "first_order": {}, "second_order": {}, "complex": {}} for label, config in class_config.items(): group = config["level"] if label == "is_interface" or label == "contact": dim_out = 1 else: dim_out = len(config["tokens"]) if group == "chain": pool = AttentionChainPool(8, dim_nodes) elif group == "complex": raise NotImplementedError elif group == "second_order": pool = NoOp() else: pool = NoOp() if group != "second_order": if self.zero_grad_fix: node_norm_layer = MaskedNorm( dim=1, num_features=dim_nodes, affine=True, norm="layer" ) mlp = MLP( dim_nodes, dim_hidden=None, dim_out=dim_out, num_layers_hidden=out_mlp_layers, activation=self.mlp_activation, dropout=dropout, ) head = nn.Sequential(node_norm_layer, mlp) else: mlp = MLP( dim_nodes, dim_hidden=None, dim_out=dim_out, num_layers_hidden=out_mlp_layers, activation="relu", dropout=dropout, ) head = mlp else: head = nn.Sequential(nn.Linear(dim_nodes, 16), NodeProduct(16, 1)) self.heads[group][label] = head, pool self.add_module(f"{label}_head", head) if pool is not None: self.add_module(f"{label}_pool", pool) def _time_features(self, t): h = { "t": lambda: t, "log_snr": lambda: self.noise_perturb.noise_schedule.log_SNR(t), }[self.time_feature_type]() if "log" in self.time_feature_type: h = self.time_log_feature_scaling * h time_h = self.time_features(h[:, None, None]) return time_h
@validate_XC()
0
2023-11-28 00:09:40+00:00
16k
BiQiWHU/CMFormer
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "mask2former/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "mask2former/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,205
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic":
mapper = MaskFormerSemanticDatasetMapper(cfg, True)
5
2023-11-29 15:26:53+00:00
16k
PopicLab/insilicoSV
test/test_processing.py
[ { "identifier": "SV_Simulator", "path": "insilicosv/simulate.py", "snippet": "class SV_Simulator:\n def __init__(self, par_file, log_file=None):\n \"\"\"\n par_file: file location to configuration file (.yaml)\n log_file: location to store log file with diagnostic information if config parameters indicate so\n \"\"\"\n global time_start\n print(\"Setting up Simulator...\")\n\n self.formatter = FormatterIO(par_file)\n self.formatter.yaml_to_var_list()\n config = self.formatter.config\n self.ref_file = config['sim_settings']['reference']\n self.ref_fasta = FastaFile(self.ref_file)\n self.svs_config = config['variant_sets']\n\n self.sim_settings = config['sim_settings']\n if log_file and \"generate_log_file\" in self.sim_settings.keys():\n logging.basicConfig(filename=log_file, filemode=\"w\", level=logging.DEBUG,\n format='[%(name)s: %(levelname)s - %(asctime)s] %(message)s')\n self.log_to_file(\"YAML Configuration: {}\".format(config))\n\n # get all chromosome ids\n self.order_ids = self.ref_fasta.references\n self.len_dict = dict() # stores mapping with key = chromosome, value = chromosome length\n for id in self.order_ids:\n chrom_len = self.ref_fasta.get_reference_length(id)\n if 'filter_small_chr' in self.sim_settings and chrom_len < self.sim_settings['filter_small_chr']:\n print(\"Filtering chromosome {}: Length of {} below threshold of {}\".format(id, chrom_len, self.sim_settings['filter_small_chr']))\n else:\n self.len_dict[id] = chrom_len\n print(\"Length of chromosome {}: {}\".format(id, self.len_dict[id]))\n\n # initialize stats file to be generated after all edits and exporting are finished\n self.stats = StatsCollection(self.order_ids, self.len_dict)\n\n self.mode = \"randomized\"\n self.vcf_path = None\n if \"vcf_path\" in self.svs_config[0]:\n self.mode = \"fixed\"\n self.vcf_path = self.svs_config[0][\"vcf_path\"]\n\n self.svs = []\n self.event_ranges = defaultdict(list)\n\n if \"avoid_intervals\" in config:\n # extract {chrom: [(start, end)]} intervals from vcf, add intervals from vcf to event range\n self.extract_vcf_event_intervals(config[\"avoid_intervals\"])\n\n self.overlap_events = None if \"overlap_events\" not in config.keys() \\\n else utils.OverlapEvents(config, allow_chroms=self.order_ids)\n\n self.initialize_svs()\n\n print(\"Finished Setting up Simulator in {} seconds\\n\".format(time.time() - time_start))\n time_start = time.time()\n\n def __repr__(self):\n return \"All structural variants entered into simulator: {}\".format(self.svs)\n\n def log_to_file(self, info, key=\"DEBUG\"):\n # only logs to file if config setting indicates so\n key_to_func = {\"DEBUG\": logging.debug, \"WARNING\": logging.warning}\n if \"generate_log_file\" in self.sim_settings and self.sim_settings[\"generate_log_file\"]:\n key_to_func[key](info)\n\n def get_rand_chr(self, check_size=None, fixed_chrom=None):\n # random assignment of SV to a chromosome (unless we have a predetermined chromosome for this event)\n valid_chrs = self.order_ids\n if check_size is not None:\n valid_chrs = [chrom for chrom, chr_size in self.len_dict.items() if chr_size >= check_size]\n if len(valid_chrs) == 0:\n raise Exception(\"SVs are too big for the reference!\")\n rand_id = valid_chrs[random.randint(0, len(valid_chrs) - 1)] if fixed_chrom is None else fixed_chrom\n chr_len = self.len_dict[rand_id]\n chr_event_ranges = self.event_ranges[rand_id]\n assert rand_id is not None\n return rand_id, chr_len, chr_event_ranges\n\n def extract_vcf_event_intervals(self, vcf_path):\n vcf = VariantFile(vcf_path)\n for rec in vcf.fetch():\n self.event_ranges[rec.chrom].append((rec.start, rec.stop))\n\n def process_vcf(self, vcf_path):\n # process vcf containing SVs to be added (deterministically) to reference\n active_svs_total = 0\n time_start_local = 0\n vcf = VariantFile(vcf_path)\n for rec in vcf.fetch():\n svtype = Variant_Type(rec.info['SVTYPE']) if 'SVTYPE' in rec.info else Variant_Type(rec.id)\n self.event_ranges[rec.chrom].append((rec.start, rec.stop))\n sv = Structural_Variant(sv_type=svtype, mode='fixed', vcf_rec=rec, ref_fasta=self.ref_fasta)\n self.svs.append(sv)\n active_svs_total += 1\n self.log_to_file(\"Intervals {} added to Chromosome \\\"{}\\\"\".format(self.event_ranges[rec.chrom], rec.chrom))\n time_dif = time.time() - time_start_local\n print(\"{} SVs successfully placed ========== {} seconds\".format(active_svs_total, time_dif), end=\"\\r\")\n time_start_local = time.time()\n\n def initialize_svs(self):\n \"\"\"\n Creates Structural_Variant objects for every SV to simulate and decides zygosity\n self.mode: flag indicating whether SVs are to be randomly generated or read in from VCF\n self.vcf_path: optional path that will be used if mode==\"fixed\"\n \"\"\"\n if self.mode == \"randomized\":\n for sv_config in self.svs_config:\n for num in range(sv_config[\"number\"]):\n # logic for placing events at intervals given in overlap bed file:\n # for the first (sv_config[\"num_overlap\"]) events, instantiate the SV at the next valid repeat elt interval\n repeat_elt = None\n elt_type = None\n if self.overlap_events is not None:\n sv_config_identifier = utils.get_sv_config_identifier(sv_config)\n if sv_config_identifier in self.overlap_events.svtype_overlap_counts.keys():\n repeat_elt, retrieved_type, elt_type = self.overlap_events.get_single_element_interval(\n sv_config_identifier, sv_config, partial_overlap=False)\n elif sv_config_identifier in self.overlap_events.svtype_partial_overlap_counts.keys():\n repeat_elt, retrieved_type, elt_type = self.overlap_events.get_single_element_interval(\n sv_config_identifier, sv_config, partial_overlap=True)\n elif sv_config_identifier in self.overlap_events.svtype_alu_mediated_counts.keys():\n repeat_elt, retrieved_type = self.overlap_events.get_alu_mediated_interval(sv_config_identifier)\n if sv_config['type'] == Variant_Type.SNP:\n sv = Structural_Variant(sv_type=sv_config[\"type\"], mode=self.mode, length_ranges=[(1, 1)])\n else:\n sv = Structural_Variant(sv_type=sv_config[\"type\"], mode=self.mode,\n length_ranges=sv_config[\"length_ranges\"], source=sv_config[\"source\"],\n target=sv_config[\"target\"],\n overlap_event=(repeat_elt + (retrieved_type if elt_type in ['ALL', None] else elt_type,) if repeat_elt is not None else None),\n div_prob=(None if 'divergence_prob' not in sv_config.keys() else sv_config['divergence_prob']))\n\n # For divergent repeat simulation, need div_dDUP to be homozygous\n if self.sim_settings.get(\"homozygous_only\", False) or random.randint(0, 1):\n sv.ishomozygous = Zygosity.HOMOZYGOUS\n sv.hap = [True, True]\n else:\n sv.ishomozygous = Zygosity.HETEROZYGOUS\n sv.hap = random.choice([[True, False], [False, True]])\n\n self.svs.append(sv)\n if not self.sim_settings[\"prioritize_top\"]:\n random.shuffle(self.svs)\n else: # mode == \"fixed\"\n self.process_vcf(self.vcf_path)\n\n def produce_variant_genome(self, fasta1_out, fasta2_out, ins_fasta, bedfile, stats_file=None, initial_reset=True,\n verbose=False, export_to_file=True):\n \"\"\"\n initial_reset: boolean to indicate if output file should be overwritten (True) or appended to (False)\n stats_file: whether a stats file summarizing SVs simulated will be generated in same directory the reference genome is located in\n \"\"\"\n global time_start\n if initial_reset:\n utils.reset_file(fasta1_out)\n utils.reset_file(fasta2_out)\n ref_fasta = self.ref_fasta\n self.apply_transformations(ref_fasta)\n print(\"Finished SV placements and transformations in {} seconds\".format(time.time() - time_start))\n time_start = time.time()\n active_svs = [sv for sv in self.svs if sv.active]\n print(\"Starting Export Process...\")\n for x in range(2):\n edits_dict = dict()\n for id in self.order_ids:\n edits_dict[id] = []\n if x == 0:\n fasta_out = fasta1_out\n elif x == 1:\n fasta_out = fasta2_out\n for sv in active_svs:\n if sv.hap[x]:\n for frag in sv.changed_fragments:\n edits_dict[frag[0]].append(frag[1:])\n for id in edits_dict:\n edits_dict[id].sort()\n self.event_ranges[id].sort()\n self.log_to_file(\"Event Ranges: {}\".format(self.event_ranges))\n self.log_to_file(\"Intervals for hap {}: {}\".format(x, edits_dict))\n for id in self.order_ids:\n edits_x = edits_dict[id]\n utils.fail_if_any_overlapping(edits_x)\n self.formatter.export_variants_to_fasta(id, edits_x, fasta_out, ref_fasta, verbose=verbose)\n print(\"ID {} exported to fasta file {} in {} seconds\".format(id, fasta_out, time.time() - time_start))\n time_start = time.time()\n if export_to_file:\n self.formatter.export_to_bedpe(active_svs, bedfile, ins_fasta=ins_fasta, reset_file=initial_reset)\n self.formatter.export_to_vcf(active_svs, self.stats, vcffile=bedfile[:-4]+'.vcf')\n if stats_file:\n self.stats.get_info(self.svs)\n self.stats.export_data(stats_file)\n\n def choose_rand_pos(self, svs, ref_fasta, verbose=False):\n \"\"\"\n randomly positions SVs and stores reference fragments in SV events\n\n svs: list of Structural Variant objects\n ref_fasta: FastaFile with access to reference file\n \"\"\"\n active_svs_total = 0\n inactive_svs_total = 0\n time_start_local = time.time()\n for sv in svs:\n tries = 0\n valid = False\n while not valid:\n tries += 1\n valid = True\n if tries > self.sim_settings[\"max_tries\"]:\n if self.sim_settings[\"fail_if_placement_issues\"]:\n raise Exception(\n \"Failed to simulate {}, {} / {} SVs successfully simulated (set fail_if_placement_issues \"\n \"to False to override placement failures)\".format(\n sv, active_svs_total, len(svs)))\n valid = False\n break\n rand_id, chr_len, chr_event_ranges = self.get_rand_chr(check_size=sv.req_space,\n fixed_chrom=(None if sv.overlap_event is None\n else sv.overlap_event[0]))\n if not (sv.dispersion_flip and sv.overlap_event is not None):\n # if an overlap event is given, need to find the SV start position based on which fragment has been\n # set to the overlap event interval\n if sv.overlap_event is not None:\n start_pos = 0\n for frag in sv.source_events[::-1]:\n if frag.start is not None:\n start_pos = frag.start\n else:\n start_pos -= frag.length\n else:\n start_pos = random.randint(0, chr_len - sv.req_space)\n # define the space in which SV operates\n new_intervals = [] # tracks new ranges of blocks\n sv.start, sv.start_chr = start_pos, rand_id\n sv.end = sv.start + sv.req_space\n block_start = sv.start\n else:\n # to assign event \"A\" to a repeat interval in a flipped dispersion event, need to\n # anchor the sv to the end of \"A\" and get the start position by subtracting off the total size\n end_pos = int(sv.overlap_event[2])\n start_pos = end_pos - sv.req_space\n new_intervals = []\n sv.start, sv.start_chr = start_pos, rand_id\n sv.end = end_pos\n block_start = sv.start\n\n for sv_event in sv.source_events:\n sv_event.start, sv_event.end = start_pos, start_pos + sv_event.length\n sv_event.source_chr = rand_id\n frag = ref_fasta.fetch(rand_id, sv_event.start, sv_event.end)\n sv_event.source_frag = frag\n start_pos += sv_event.length\n\n if sv_event.symbol.startswith(Symbols.DIS.value):\n if utils.is_overlapping(chr_event_ranges, (block_start, sv_event.start)):\n valid = False\n break\n new_intervals.append((block_start, sv_event.start))\n block_start = sv_event.end\n elif utils.percent_N(frag) > 0.05:\n valid = False\n break\n # catches the last (and perhaps only) block in sequence\n if utils.is_overlapping(chr_event_ranges, (block_start, sv.end)):\n valid = False\n continue\n else:\n new_intervals.append((block_start, sv.end))\n\n # adds new SV to simulate only if chosen positions were valid\n if valid:\n active_svs_total += 1\n sv.active = True\n self.log_to_file(\"Intervals {} added to Chromosome \\\"{}\\\" for SV {}\".format(new_intervals, rand_id, sv))\n chr_event_ranges.extend(new_intervals)\n # populates insertions with random sequence - these event symbols only show up in target transformation\n for event in sv.events_dict.values():\n if event.source_frag is None and event.length > 0:\n event.source_frag = utils.generate_seq(event.length)\n sv.assign_locations(sv.start)\n else:\n inactive_svs_total += 1\n if tries != self.sim_settings[\"max_tries\"] + 1:\n self.log_to_file(\"{} only got {} tries instead of the max {}\".format(sv, tries, self.sim_settings[\n \"max_tries\"] + 1), key=\"WARNING\")\n\n time_dif = time.time() - time_start_local\n print(\n \"{} / {} SVs successfully placed ========== {} / {} SVs unsuccessfully placed, {} tries, {} seconds\".format(\n active_svs_total, len(svs), inactive_svs_total, len(svs), tries, time_dif), end=\"\\r\")\n time_start_local = time.time()\n\n def apply_transformations(self, ref_fasta):\n \"\"\"\n Randomly chooses positions for all SVs and carries out all edits\n Populates event classes within SVs with reference fragments and start & end positions\n Stores list of changes, which each have an interval and a sequence to substitute the reference frag with, in SV\n\n ref_fasta: FastaFile with access to reference\n mode: flag indicating whether we're adding SVs to the reference in a randomized or deterministic way\n \"\"\"\n if self.mode == \"randomized\":\n # select random positions for SVs\n self.choose_rand_pos(self.svs, ref_fasta)\n print()\n\n total = 0\n for sv in self.svs:\n if sv.active:\n sv.change_fragment()\n total += 1\n self.log_to_file(\"Events Dict after all edits: {} \".format(sv.events_dict))\n\n def close(self):\n self.ref_fasta.close()" }, { "identifier": "FormatterIO", "path": "insilicosv/processing.py", "snippet": "class FormatterIO:\n def __init__(self, par_file):\n self.bedpe_counter = 1\n self.par_file = par_file\n self.config = None\n\n @staticmethod\n def run_checks_randomized(config):\n \"\"\"\n check method for yaml given with SVs given for randomized placement on reference\n \"\"\"\n config_svs = config['variant_sets']\n for config_sv in config_svs:\n if \"avoid_intervals\" in config_sv:\n continue\n elif \"type\" not in config_sv:\n raise Exception(\"\\\"Type\\\" attribute must be specified! For custom transformations, enter in \\\"Custom\\\"\")\n elif config_sv[\"type\"] == \"SNP\": # SNP events are only specified by count (size is deterministic)\n if \"number\" in config_sv and isinstance(config_sv[\"number\"], int) and config_sv[\"number\"] > 0:\n continue\n else:\n raise Exception(\"Number (of type int > 0) is a required parameter for all SVs\")\n if \"min_length\" not in config_sv:\n raise Exception(\"Min length must be specified on all SVs!\")\n if \"max_length\" not in config_sv:\n raise Exception(\"Max length must be specified on all SVs!\")\n if \"number\" not in config_sv:\n raise Exception(\"Number is a required parameter for all SVs\")\n\n elif \"type\" in config_sv and not isinstance(config_sv[\"type\"], str):\n raise Exception(\"Invalid {} type for SV \\'type\\' attribute, str expected\".format(type(config_sv[\"type\"])))\n valid_optional_par = [\"fail_if_placement_issues\", \"max_tries\", \"generate_log_file\", \"filter_small_chr\",\n \"prioritize_top\", \"homozygous_only\", \"reference\"] # valid arguments within sim_settings\n for parameter in config['sim_settings']:\n if parameter not in valid_optional_par:\n raise Exception(\"\\\"{}\\\" is an invalid argument under sim_settings\".format(parameter))\n valid_keys = [\"sim_settings\", \"variant_sets\", \"overlap_events\", \"avoid_intervals\"] # valid arguments at the top level\n for key in config:\n if key not in valid_keys:\n raise Exception(\"Unknown argument \\\"{}\\\"\".format(key))\n\n def postproc_config_dict(self):\n if 'sim_settings' not in self.config.keys():\n raise Exception(\"Must include \\'sim_settings\\' sections specifying at least \\'reference\\' path\")\n if \"filter_small_chr\" in self.config.keys() and not isinstance(self.config[\"filter_small_chr\"], int):\n raise Exception(\"Must provide value of type int to \\'filter_small_chr\\'\")\n if \"reference\" not in self.config[\"sim_settings\"]:\n raise Exception(\"Must include reference FASTA file in \\'reference\\' field of \\'sim_settings\\'\")\n elif self.config[\"sim_settings\"][\"reference\"].split(\".\")[-1] not in [\"fa\", \"fna\", \"fasta\"]:\n raise Exception(\"Input reference must be of type .fa, .fna, or .fasta\")\n if \"vcf_path\" not in self.config[\"variant_sets\"][0]:\n self.run_checks_randomized(self.config)\n for config_sv in self.config['variant_sets']:\n if \"vcf_path\" in config_sv:\n continue\n # SV event length specification - not applicable for SNPs\n if config_sv[\"type\"] != \"SNP\":\n if not isinstance(config_sv[\"min_length\"], list) or not isinstance(config_sv[\"max_length\"], list):\n raise Exception(\"Must provide entries of type list to \\'min_length\\' and \\'max_length\\'\")\n else:\n config_sv[\"length_ranges\"] = list(zip(config_sv[\"min_length\"], config_sv[\"max_length\"]))\n assert all(max_len >= min_len >= 0 for (min_len, max_len) in config_sv[\"length_ranges\"]), \"Max length must be >= min length for all SVs! Also ensure that all length values are >= 0.\"\n if \"divergence_prob\" in config_sv:\n if config_sv[\"type\"] != \"DIVERGENCE\":\n raise Exception(\"divergence_prob can only be given for event type DIVERGENCE\")\n else:\n assert isinstance(config_sv[\"divergence_prob\"], int) or isinstance(config_sv[\"divergence_prob\"], float), \\\n \"Must give \\'divergence_prob\\'\"\n assert 1 >= config_sv[\"divergence_prob\"] > 0, \"divergence_prob must be in (0,1]\"\n\n config_sv[\"type\"] = Variant_Type(config_sv[\"type\"])\n if config_sv[\"type\"] != Variant_Type.Custom:\n config_sv[\"source\"] = None\n config_sv[\"target\"] = None\n\n # setting default values for sim_settings fields\n if 'max_tries' not in self.config['sim_settings']:\n self.config['sim_settings']['max_tries'] = 50\n if 'fail_if_placement_issues' not in self.config['sim_settings']:\n self.config['sim_settings']['fail_if_placement_issues'] = False\n\n def yaml_to_var_list(self):\n try:\n with open(self.par_file) as yaml_file:\n self.config = yaml.full_load(yaml_file)\n except:\n raise Exception(\"YAML File {} failed to be open\".format(self.par_file))\n self.postproc_config_dict()\n\n def write_to_file(self, sv, bedfile, source_s, source_e, target_s, target_e, transform, event, sv_id):\n assert (not event.symbol.startswith(Symbols.DIS.value))\n if transform == Operations.INS.value:\n transform_length = event.length\n else:\n transform_length = source_e - source_s\n if event.length > 0:\n with open(bedfile, \"a\") as fout:\n row = [str(event.source_chr),\n str(source_s),\n str(source_e),\n str(event.source_chr),\n str(target_s),\n str(target_e),\n transform,\n str(transform_length),\n '%d/%d' % (int(sv.hap[0]), int(sv.hap[1])),\n sv.name,\n str(sv_id)]\n fout.write(\"\\t\".join(row) + \"\\n\")\n\n @staticmethod\n def symbol_is_inversion(symbol):\n return any(c.islower() for c in symbol)\n\n @staticmethod\n def export_insertions(chr, start_pos, seq, ins_fasta):\n \"\"\"\n Exports foreign insertion sequences to separate fasta file, append only\n \"\"\"\n with open(ins_fasta, \"a\") as fout_ins:\n fout_ins.write(\">{}_{}\\n\".format(chr, start_pos))\n fout_ins.write(\"{}\\n\".format(seq))\n\n @staticmethod\n def get_event_target_operation(ev, target_events_dict, source_events_dict):\n \"\"\"\n determines target interval and operation for multi-source events\n \"\"\"\n # A -> A'\n if ev + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), \\\n Operations.DUP.value if ev in target_events_dict.keys() else Operations.TRA.value\n # A -> a'\n elif ev.lower() + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev.lower() + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INVDUP.value\n # A -> a\n elif ev.lower() in target_events_dict.keys():\n trg_sym = ev.lower()\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INV.value\n # A -> A* (in the case of a custom event in which an event is divergently duplicated)\n elif ev + Symbols.DIV.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DIV.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.DIV.value\n # A -> A (insertion if source A is undefined, identity otherwise)\n elif ev in target_events_dict.keys():\n return (target_events_dict[ev].start, target_events_dict[ev].end), \\\n Operations.INS.value if source_events_dict[ev].start is None else Operations.IDENTITY.value\n # A -> [none]\n elif ev not in [sym[0] for sym in target_events_dict.keys()]:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.DEL.value\n # otherwise unknown mapping\n else:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.UNDEFINED.value\n\n @staticmethod\n def postprocess_record_params(sv, sv_record_info):\n \"\"\"\n arrange the bed_record parameter dictionaries in order of ascending source interval start position\n and assign order values to the relevant entries\n \"\"\"\n # for TRA/INS/DUP events with the same target position, 'order' describes the order in which they\n # are compiled (i.e., the order in which they appear in the target sequence)\n order = 0\n ins_pos = None\n for block in sv.target_symbol_blocks:\n for target_event in block:\n if target_event.symbol.startswith(Symbols.DIS.value) or \\\n target_event.symbol in sv_record_info.keys(): # <- prevent collision with A' and A if both in target\n continue\n src_sym = target_event.symbol[0].upper()\n if sv_record_info[src_sym]['transform'] in NONZERO_ORDER_OPERATIONS:\n if ins_pos is None:\n ins_pos = sv_record_info[src_sym]['target_s']\n order += 1\n elif sv_record_info[src_sym]['target_s'] == ins_pos:\n order += 1\n else:\n ins_pos = None\n order = 0\n # sv_record_info[src_sym]['order'] = order\n return sorted([params for params in sv_record_info.values()], key=lambda params: params['source_s'])\n\n def export_to_bedpe(self, svs, bedfile, ins_fasta=None, reset_file=True):\n if reset_file:\n utils.reset_file(bedfile)\n if ins_fasta:\n utils.reset_file(ins_fasta)\n for sv_id, sv in enumerate(svs):\n # SVs with multiple source events will be split into multiple bed records (one for each)\n if len(sv.events_dict) == 1:\n ev = list(sv.sv_blocks.target_events_dict.values())[0] if sv.type == Variant_Type.INS\\\n else list(sv.events_dict.values())[0]\n op = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)[1]\n record_info = {'source_s': ev.start, 'source_e': ev.end, 'target_s': ev.start, 'target_e': ev.end,\n 'transform': op, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n self.write_to_file(**record_info)\n if op == Operations.INS.value:\n self.export_insertions(sv.start_chr, ev.start, ev.source_frag, ins_fasta)\n else:\n # multiple source events: source intervals taken from the source events\n # and target intervals taken from corresponding target events (if no match, then deletion)\n sv_record_info = {}\n for ev in sv.events_dict.values():\n if ev.symbol.startswith(Symbols.DIS.value):\n continue\n sv_record_info[ev.symbol] = {'source_s': ev.start, 'source_e': ev.end, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n (target_s, target_e), operation = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)\n sv_record_info[ev.symbol]['target_s'] = target_s\n sv_record_info[ev.symbol]['target_e'] = target_e\n sv_record_info[ev.symbol]['transform'] = operation\n for param_dict in self.postprocess_record_params(sv, sv_record_info):\n self.write_to_file(**param_dict)\n\n def export_to_vcf(self, svs, stats, vcffile):\n with open(vcffile, \"w\") as vcf:\n vcf.write(\"##fileformat=VCFv4.2\\n\")\n for chrm, chrm_len in stats.chr_lengths.items():\n vcf.write(\"##contig=<ID=%s,length=%d>\\n\" % (chrm, chrm_len))\n vcf.write(\"#%s\\n\" % \"\\t\".join([\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\",\n \"SAMPLE\"]))\n # *** This will throw an error with pysam version 0.18, need 0.16.0.1\n vcf_file = pysam.VariantFile(vcffile)\n vcf_file.header.info.add('END', number=1, type='Integer', description=\"End position of the variant \"\n \"described in this record\")\n vcf_file.header.info.add('CIPOS', number=2, type='Integer', description=\"Confidence interval around POS for \"\n \"imprecise variants\")\n vcf_file.header.info.add('CIEND', number=2, type='Integer', description=\"Confidence interval around END for \"\n \"imprecise variants\")\n vcf_file.header.info.add('SVTYPE', number=1, type='String', description=\"Type of structural variant\")\n vcf_file.header.info.add('SVLEN', number=1, type='Integer', description=\"Length of structural variant\")\n vcf_file.header.info.add('SVMETHOD', number=1, type='String', description=\"SV detection method\")\n vcf_file.header.info.add('TARGET', number=1, type='Integer', description=\"Target location for divergent repeat\")\n vcf_file.header.info.add('OVERLAP_EV', number=1, type='String', description=\"Bool. indicator for the event being\"\n \"placed at an overlap_events interval\")\n vcf_file.header.formats.add('GT', number=1, type='String', description=\"Genotype\")\n\n vcf_out_file = pysam.VariantFile(vcffile, 'w', header=vcf_file.header)\n\n for sv in svs:\n zyg = (int(sv.hap[0]), int(sv.hap[1]))\n dispersion_target = None\n if sv.type in DISPERSION_TYPES:\n source_event = sv.events_dict[Symbols.REQUIRED_SOURCE.value]\n disp_event = sv.events_dict['_1']\n rec_start = source_event.start\n rec_end = source_event.end\n if disp_event.start == source_event.end:\n dispersion_target = disp_event.end\n else:\n dispersion_target = disp_event.start\n else:\n rec_start = min([frag[1] for frag in sv.changed_fragments])\n rec_end = max(frag[2] for frag in sv.changed_fragments)\n if dispersion_target is not None:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start, 'TARGET': dispersion_target}\n else:\n if sv.type == Variant_Type.INS:\n # special case of simple INS: sv length \\neq (sv end - sv start)\n # **pysam will delete END fields that are equal to POS, therefore INS records won't have an END\n rec_end += 1\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': sv.events_dict[Symbols.REQUIRED_SOURCE.value].length}\n else:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start}\n if sv.overlap_event is not None:\n info_field['OVERLAP_EV'] = sv.overlap_event[3]\n\n vcf_record = vcf_out_file.header.new_record(contig=sv.start_chr, start=rec_start, stop=rec_end,\n alleles=['N', '<%s>' % sv.type.value], id=sv.type.value,\n info=info_field,\n qual=100, filter='PASS',\n samples=[{'GT': zyg}])\n vcf_out_file.write(vcf_record)\n\n vcf_out_file.close()\n\n def export_variants_to_fasta(self, id, edits, fasta_out, fasta_file, verbose=False):\n \"\"\"\n Exports list of changes from simulator to fasta file\n\n id: chr_id to apply edits to\n edits: list with elements of the form (start, end, new_frag)\n fasta_out: Fasta file to export changes to\n fasta_file: FastaFile with access to reference\n \"\"\"\n with open(fasta_out, \"a\") as fout_export:\n if id not in fasta_file.references:\n raise KeyError(\"ID {} not found in inputted fasta file\".format(id))\n if verbose:\n print(\"New ID: \", id)\n fout_export.write(\">\" + str(id) + \"\\n\")\n chr_variants = list(edits)\n chr_variants.sort()\n chr_variants.append([fasta_file.get_reference_length(id), fasta_file.get_reference_length(id), \"\"])\n pos = 0\n for variant in chr_variants:\n var_start, var_end = variant[0], variant[1]\n while pos < var_start:\n appropriate_buffer = MAX_BUFFER_SIZE if var_start - pos > MAX_BUFFER_SIZE else var_start - pos\n c = fasta_file.fetch(id, pos, pos + appropriate_buffer)\n fout_export.write(c)\n pos += appropriate_buffer\n assert (pos == var_start), \"Replacement fragment about to be inserted at position {} instead of var_start {}\".format(pos, var_start)\n fout_export.write(variant[2])\n pos = var_end\n fout_export.write(\"\\n\")\n\n def close(self):\n self.fin_export1.close()\n self.fin_export2.close()" }, { "identifier": "NestedDict", "path": "insilicosv/utils.py", "snippet": "class NestedDict(defaultdict):\n def __call__(self):\n return NestedDict(self.default_factory)" }, { "identifier": "utils", "path": "insilicosv/utils.py", "snippet": "class NestedDict(defaultdict):\nclass OverlapEvents:\n def __call__(self):\ndef is_overlapping(event_ranges, addition, called_from_helper=False, strictly_partial=False):\ndef fail_if_any_overlapping(arr):\ndef validate_symbols(source, target):\ndef remove_file(file):\ndef reset_file(filename):\ndef generate_seq(length):\ndef percent_N(seq):\ndef complement(seq):\ndef divergence(seq, divergence_prob=None):\ndef get_sv_config_identifier(sv_config):\n def __init__(self, config, allow_chroms=None):\n def get_num_overlap_counts(self, config):\n def parse_bed_file(self, bed_fname, allow_chroms=None, allow_types=None):\n def get_single_element_interval(self, sv_config_id, sv_config, partial_overlap):\n def populate_alu_pairs(self, svs_config):\n def get_alu_mediated_interval(self, sv_config_id):\n def remove_alu_from_overlap_dict(self, chrom, start, end):\n def midpoint(start, end):\n def get_intrvl_len(chr, st, end):\n def elt_type_is_allowed(self, elt_type):\n def get_partially_overlapping_interval(elt_chrom, elt_start, elt_stop, sv_min, sv_max):\n def draw_from_unif(a, b):\n def decrement_counts(self, sv_config_id, input_elt_type, partial_overlap):\n def __getitem__(self, sv_config_id, minsize, maxsize, elt_type=None, partial_overlap=False):" }, { "identifier": "constants", "path": "insilicosv/constants.py", "snippet": "MAX_BUFFER_SIZE: int = 1000000 # max number of bases that can be read at one time to export to fasta file\n INS = \"INS\"\n DEL = \"DEL\"\n INV = \"INV\"\n DUP = \"DUP\"\n SNP = \"SNP\"\n TRA = \"TRA\"\n DIVERGENCE = \"DIVERGENCE\"\nDISPERSION_TYPES = [Variant_Type.dDUP, Variant_Type.INV_dDUP,\n Variant_Type.TRA, Variant_Type.div_dDUP,\n Variant_Type.dDUP_iDEL, Variant_Type.INS_iDEL]\n INS = \"INS\"\n DUP = \"DUP\"\n INV = \"INV\"\n DEL = \"DEL\"\n TRA = \"TRA\"\n INVDUP = \"INVDUP\"\n INVTRA = \"INVTRA\"\n IDENTITY = \"IDENTITY\"\n UNDEFINED = \"UNDEFINED\"\n DIV = \"DIV\"\nNONZERO_ORDER_OPERATIONS = [Operations.TRA.value, Operations.INS.value, Operations.DUP.value, Operations.INVDUP.value,\n Operations.INVTRA.value, Operations.DIV.value]\n UNDEFINED = -1\n HOMOZYGOUS = 1\n HETEROZYGOUS = 0\n DIS = \"_\" # dispersion event\n DUP = \"'\" # attached to symbols that are not the original one from source sequence\n DIV = \"*\" # divergent interval, attached to symbols that vary from the original by low-probability base error\n REQUIRED_SOURCE = \"A\" # event symbol of the required source/main event all SVs must have\nSV_KEY = {Variant_Type.INS: [(), (\"A\")],\n Variant_Type.SNP: [(\"A\",), (\"A*\",)],\n Variant_Type.DEL: [(\"A\",), ()],\n Variant_Type.INV: [(\"A\",), (\"a\",)],\n Variant_Type.DUP: [(\"A\",), (\"A\", \"A'\")],\n Variant_Type.TRA: [(\"A\", \"_\"), (\"_\", \"A'\")],\n Variant_Type.dupINVdup: [(\"A\", \"B\", \"C\"), (\"A\", \"c'\", \"b\", \"a'\", \"C\")],\n Variant_Type.delINVdel: [(\"A\", \"B\", \"C\"), (\"b\",)],\n Variant_Type.delINVdup: [(\"A\", \"B\", \"C\"), (\"c'\", \"b\", \"C\")],\n Variant_Type.dupINVdel: [(\"A\", \"B\", \"C\"), (\"A\", \"b\", \"a'\")],\n Variant_Type.delINV: [(\"A\", \"B\"), (\"b\",)],\n Variant_Type.INVdel: [(\"A\", \"B\"), (\"a\",)],\n Variant_Type.dDUP_iDEL: [(\"A\", \"_\", \"B\"), (\"A\", \"_\", \"A'\")],\n Variant_Type.INS_iDEL: [(\"A\", \"_\", \"B\"), (\"_\", \"A'\")],\n Variant_Type.INVdup: [(\"A\",), (\"a\", \"a'\")],\n Variant_Type.dup_INV: [(\"A\", \"B\"), (\"A\", \"b\", \"a'\")],\n Variant_Type.INV_dup: [(\"A\", \"B\"), (\"b'\", \"a\", \"B\")],\n Variant_Type.dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"A'\")],\n Variant_Type.INV_dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"a'\")],\n Variant_Type.div_dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"A*\")],\n Variant_Type.DIVERGENCE: [(\"A\",), (\"A*\",)]}\nDEFAULT_CONFIG = {\"sim_settings\": {\"max_tries\": 100,\n \"fail_if_placement_issues\": False,\n \"generate_log_file\": False,\n \"prioritize_top\": False},\n \"variant_sets\": {}}\nclass Variant_Type(Enum):\nclass Operations(Enum):\nclass Zygosity(Enum):\nclass Symbols(Enum):" } ]
from insilicosv.simulate import SV_Simulator from insilicosv.processing import FormatterIO from test_simulate import TestObject from pysam import VariantFile, FastaFile from collections import defaultdict, Counter from insilicosv.utils import NestedDict from insilicosv import utils from insilicosv import constants import unittest import sys import os
14,074
"num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.formatter = FormatterIO(self.par) def tearDown(self): utils.remove_file(self.ins_fasta) utils.remove_file(self.bed) utils.remove_file(self.vcf) utils.remove_file(self.par) def initialize_test(self, test_objects_dict, sv_type, output_type='bed', ins_fasta=None): # function to execute the shared logic for simulating SVs from test objects and generating bed/vcf output config = test_objects_dict[sv_type] config.initialize_files()
class TestProcObject(TestObject): def __init__(self, ref, par, hap1, hap2, bed, vcf): self.vcf = vcf super().__init__(ref, par, hap1, hap2, bed) def extract_bed_records(self): # parse bed record into dict for easy comparison # --> example split bed record: ['chr19', '0', '3', 'chr19', '0', '3', 'DEL', '3', '1/1', 'DEL', '1'] bed_records = [] with open(self.bed) as f: for line in f: ln = line.split() bed_record = {'source_chr': ln[0], 'source_s': ln[1], 'source_e': ln[2], 'target_chr': ln[3], 'target_s': ln[4], 'target_e': ln[5], 'ev_type': ln[6], 'len': ln[7], 'zyg': ln[8], 'parent_type': ln[9], 'sv_id': ln[10]} bed_records.append(bed_record) return bed_records def extract_vcf_records(self): vcf_records = [] vcf = VariantFile(self.vcf) for rec in vcf.fetch(): ln = str(rec).split() # separately parse info field of the form: 'END=45590417;SVTYPE=dDUP;SVLEN=539;TARGET=45581738' info = {field.split('=')[0]: field.split('=')[1] for field in ln[7].split(';')} vcf_record = {'CHROM': ln[0], 'POS': ln[1], 'ID': ln[2], 'REF': ln[3], 'ALT': ln[4], 'QUAL': ln[5], 'FILTER': ln[6], 'INFO': info, 'FORMAT': ln[8], 'SAMPLE': ln[9]} vcf_records.append(vcf_record) return vcf_records class TestProcessing(unittest.TestCase): def setUp(self): # runs before every test self.ref_file = "test/inputs/test.fa" self.par = "test/inputs/par.yaml" self.hap1 = "test/inputs/test1.fa" self.hap2 = "test/inputs/test2.fa" self.bed = "test/inputs/out.bed" self.vcf = "test/inputs/out.vcf" self.ins_fasta = "test/inputs/ins_fasta.fa" self.test_overlap_bed = "test/inputs/example_overlap_events.bed" self.test_overlap_bed_2 = "test/inputs/example_overlap_events_2.bed" # test_overlap_bed_3: events with differing chromosome self.test_overlap_bed_3 = "test/inputs/example_overlap_events_3.bed" self.test_overlap_bed_4 = "test/inputs/example_overlap_events_4.bed" self.test_overlap_bed_11 = "test/inputs/example_overlap_events_11.bed" self.test_objects_simple_events = {'DEL': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DEL", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'DUP': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DUP", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INV", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS': TestProcObject([self.ref_file, {"chr19": "C"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INS", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_flanked_inversions = {'dupINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'dupINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dispersions = {'dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'TRA': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "TRA", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_del_inv = {'delINV': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINV", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdel", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_idel = {'dDUP_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INS_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dup_inv = {'dup_INV': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dup_INV", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dup': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dup", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_INVdup = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 1, "max_length": [4], "min_length": [4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_multievent = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTGCTAATGCGTTCACTGCTAATGCGTTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 200, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 3, "max_length": [4], "min_length": [2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.formatter = FormatterIO(self.par) def tearDown(self): utils.remove_file(self.ins_fasta) utils.remove_file(self.bed) utils.remove_file(self.vcf) utils.remove_file(self.par) def initialize_test(self, test_objects_dict, sv_type, output_type='bed', ins_fasta=None): # function to execute the shared logic for simulating SVs from test objects and generating bed/vcf output config = test_objects_dict[sv_type] config.initialize_files()
curr_sim = SV_Simulator(config.par)
0
2023-12-01 14:39:20+00:00
16k
BiQiWHU/BWG
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "mask2former/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "mask2former/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,255
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic":
mapper = MaskFormerPanopticDatasetMapper(cfg, True)
4
2023-11-29 17:15:46+00:00
16k
opisaac9001/TTS-With-ooba-and-voice
TTS/vocoder/models/wavernn.py
[ { "identifier": "plot_spectrogram", "path": "TTS/tts/utils/visual.py", "snippet": "def plot_spectrogram(spectrogram, ap=None, fig_size=(16, 10), output_fig=False):\n if isinstance(spectrogram, torch.Tensor):\n spectrogram_ = spectrogram.detach().cpu().numpy().squeeze().T\n else:\n spectrogram_ = spectrogram.T\n spectrogram_ = spectrogram_.astype(np.float32) if spectrogram_.dtype == np.float16 else spectrogram_\n if ap is not None:\n spectrogram_ = ap.denormalize(spectrogram_) # pylint: disable=protected-access\n fig = plt.figure(figsize=fig_size)\n plt.imshow(spectrogram_, aspect=\"auto\", origin=\"lower\")\n plt.colorbar()\n plt.tight_layout()\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "AudioProcessor", "path": "TTS/utils/audio/processor.py", "snippet": "class AudioProcessor(object):\n \"\"\"Audio Processor for TTS.\n\n Note:\n All the class arguments are set to default values to enable a flexible initialization\n of the class with the model config. They are not meaningful for all the arguments.\n\n Args:\n sample_rate (int, optional):\n target audio sampling rate. Defaults to None.\n\n resample (bool, optional):\n enable/disable resampling of the audio clips when the target sampling rate does not match the original sampling rate. Defaults to False.\n\n num_mels (int, optional):\n number of melspectrogram dimensions. Defaults to None.\n\n log_func (int, optional):\n log exponent used for converting spectrogram aplitude to DB.\n\n min_level_db (int, optional):\n minimum db threshold for the computed melspectrograms. Defaults to None.\n\n frame_shift_ms (int, optional):\n milliseconds of frames between STFT columns. Defaults to None.\n\n frame_length_ms (int, optional):\n milliseconds of STFT window length. Defaults to None.\n\n hop_length (int, optional):\n number of frames between STFT columns. Used if ```frame_shift_ms``` is None. Defaults to None.\n\n win_length (int, optional):\n STFT window length. Used if ```frame_length_ms``` is None. Defaults to None.\n\n ref_level_db (int, optional):\n reference DB level to avoid background noise. In general <20DB corresponds to the air noise. Defaults to None.\n\n fft_size (int, optional):\n FFT window size for STFT. Defaults to 1024.\n\n power (int, optional):\n Exponent value applied to the spectrogram before GriffinLim. Defaults to None.\n\n preemphasis (float, optional):\n Preemphasis coefficient. Preemphasis is disabled if == 0.0. Defaults to 0.0.\n\n signal_norm (bool, optional):\n enable/disable signal normalization. Defaults to None.\n\n symmetric_norm (bool, optional):\n enable/disable symmetric normalization. If set True normalization is performed in the range [-k, k] else [0, k], Defaults to None.\n\n max_norm (float, optional):\n ```k``` defining the normalization range. Defaults to None.\n\n mel_fmin (int, optional):\n minimum filter frequency for computing melspectrograms. Defaults to None.\n\n mel_fmax (int, optional):\n maximum filter frequency for computing melspectrograms. Defaults to None.\n\n pitch_fmin (int, optional):\n minimum filter frequency for computing pitch. Defaults to None.\n\n pitch_fmax (int, optional):\n maximum filter frequency for computing pitch. Defaults to None.\n\n spec_gain (int, optional):\n gain applied when converting amplitude to DB. Defaults to 20.\n\n stft_pad_mode (str, optional):\n Padding mode for STFT. Defaults to 'reflect'.\n\n clip_norm (bool, optional):\n enable/disable clipping the our of range values in the normalized audio signal. Defaults to True.\n\n griffin_lim_iters (int, optional):\n Number of GriffinLim iterations. Defaults to None.\n\n do_trim_silence (bool, optional):\n enable/disable silence trimming when loading the audio signal. Defaults to False.\n\n trim_db (int, optional):\n DB threshold used for silence trimming. Defaults to 60.\n\n do_sound_norm (bool, optional):\n enable/disable signal normalization. Defaults to False.\n\n do_amp_to_db_linear (bool, optional):\n enable/disable amplitude to dB conversion of linear spectrograms. Defaults to True.\n\n do_amp_to_db_mel (bool, optional):\n enable/disable amplitude to dB conversion of mel spectrograms. Defaults to True.\n\n do_rms_norm (bool, optional):\n enable/disable RMS volume normalization when loading an audio file. Defaults to False.\n\n db_level (int, optional):\n dB level used for rms normalization. The range is -99 to 0. Defaults to None.\n\n stats_path (str, optional):\n Path to the computed stats file. Defaults to None.\n\n verbose (bool, optional):\n enable/disable logging. Defaults to True.\n\n \"\"\"\n\n def __init__(\n self,\n sample_rate=None,\n resample=False,\n num_mels=None,\n log_func=\"np.log10\",\n min_level_db=None,\n frame_shift_ms=None,\n frame_length_ms=None,\n hop_length=None,\n win_length=None,\n ref_level_db=None,\n fft_size=1024,\n power=None,\n preemphasis=0.0,\n signal_norm=None,\n symmetric_norm=None,\n max_norm=None,\n mel_fmin=None,\n mel_fmax=None,\n pitch_fmax=None,\n pitch_fmin=None,\n spec_gain=20,\n stft_pad_mode=\"reflect\",\n clip_norm=True,\n griffin_lim_iters=None,\n do_trim_silence=False,\n trim_db=60,\n do_sound_norm=False,\n do_amp_to_db_linear=True,\n do_amp_to_db_mel=True,\n do_rms_norm=False,\n db_level=None,\n stats_path=None,\n verbose=True,\n **_,\n ):\n # setup class attributed\n self.sample_rate = sample_rate\n self.resample = resample\n self.num_mels = num_mels\n self.log_func = log_func\n self.min_level_db = min_level_db or 0\n self.frame_shift_ms = frame_shift_ms\n self.frame_length_ms = frame_length_ms\n self.ref_level_db = ref_level_db\n self.fft_size = fft_size\n self.power = power\n self.preemphasis = preemphasis\n self.griffin_lim_iters = griffin_lim_iters\n self.signal_norm = signal_norm\n self.symmetric_norm = symmetric_norm\n self.mel_fmin = mel_fmin or 0\n self.mel_fmax = mel_fmax\n self.pitch_fmin = pitch_fmin\n self.pitch_fmax = pitch_fmax\n self.spec_gain = float(spec_gain)\n self.stft_pad_mode = stft_pad_mode\n self.max_norm = 1.0 if max_norm is None else float(max_norm)\n self.clip_norm = clip_norm\n self.do_trim_silence = do_trim_silence\n self.trim_db = trim_db\n self.do_sound_norm = do_sound_norm\n self.do_amp_to_db_linear = do_amp_to_db_linear\n self.do_amp_to_db_mel = do_amp_to_db_mel\n self.do_rms_norm = do_rms_norm\n self.db_level = db_level\n self.stats_path = stats_path\n # setup exp_func for db to amp conversion\n if log_func == \"np.log\":\n self.base = np.e\n elif log_func == \"np.log10\":\n self.base = 10\n else:\n raise ValueError(\" [!] unknown `log_func` value.\")\n # setup stft parameters\n if hop_length is None:\n # compute stft parameters from given time values\n self.win_length, self.hop_length = millisec_to_length(\n frame_length_ms=self.frame_length_ms, frame_shift_ms=self.frame_shift_ms, sample_rate=self.sample_rate\n )\n else:\n # use stft parameters from config file\n self.hop_length = hop_length\n self.win_length = win_length\n assert min_level_db != 0.0, \" [!] min_level_db is 0\"\n assert (\n self.win_length <= self.fft_size\n ), f\" [!] win_length cannot be larger than fft_size - {self.win_length} vs {self.fft_size}\"\n members = vars(self)\n if verbose:\n print(\" > Setting up Audio Processor...\")\n for key, value in members.items():\n print(\" | > {}:{}\".format(key, value))\n # create spectrogram utils\n self.mel_basis = build_mel_basis(\n sample_rate=self.sample_rate,\n fft_size=self.fft_size,\n num_mels=self.num_mels,\n mel_fmax=self.mel_fmax,\n mel_fmin=self.mel_fmin,\n )\n # setup scaler\n if stats_path and signal_norm:\n mel_mean, mel_std, linear_mean, linear_std, _ = self.load_stats(stats_path)\n self.setup_scaler(mel_mean, mel_std, linear_mean, linear_std)\n self.signal_norm = True\n self.max_norm = None\n self.clip_norm = None\n self.symmetric_norm = None\n\n @staticmethod\n def init_from_config(config: \"Coqpit\", verbose=True):\n if \"audio\" in config:\n return AudioProcessor(verbose=verbose, **config.audio)\n return AudioProcessor(verbose=verbose, **config)\n\n ### normalization ###\n def normalize(self, S: np.ndarray) -> np.ndarray:\n \"\"\"Normalize values into `[0, self.max_norm]` or `[-self.max_norm, self.max_norm]`\n\n Args:\n S (np.ndarray): Spectrogram to normalize.\n\n Raises:\n RuntimeError: Mean and variance is computed from incompatible parameters.\n\n Returns:\n np.ndarray: Normalized spectrogram.\n \"\"\"\n # pylint: disable=no-else-return\n S = S.copy()\n if self.signal_norm:\n # mean-var scaling\n if hasattr(self, \"mel_scaler\"):\n if S.shape[0] == self.num_mels:\n return self.mel_scaler.transform(S.T).T\n elif S.shape[0] == self.fft_size / 2:\n return self.linear_scaler.transform(S.T).T\n else:\n raise RuntimeError(\" [!] Mean-Var stats does not match the given feature dimensions.\")\n # range normalization\n S -= self.ref_level_db # discard certain range of DB assuming it is air noise\n S_norm = (S - self.min_level_db) / (-self.min_level_db)\n if self.symmetric_norm:\n S_norm = ((2 * self.max_norm) * S_norm) - self.max_norm\n if self.clip_norm:\n S_norm = np.clip(\n S_norm, -self.max_norm, self.max_norm # pylint: disable=invalid-unary-operand-type\n )\n return S_norm\n else:\n S_norm = self.max_norm * S_norm\n if self.clip_norm:\n S_norm = np.clip(S_norm, 0, self.max_norm)\n return S_norm\n else:\n return S\n\n def denormalize(self, S: np.ndarray) -> np.ndarray:\n \"\"\"Denormalize spectrogram values.\n\n Args:\n S (np.ndarray): Spectrogram to denormalize.\n\n Raises:\n RuntimeError: Mean and variance are incompatible.\n\n Returns:\n np.ndarray: Denormalized spectrogram.\n \"\"\"\n # pylint: disable=no-else-return\n S_denorm = S.copy()\n if self.signal_norm:\n # mean-var scaling\n if hasattr(self, \"mel_scaler\"):\n if S_denorm.shape[0] == self.num_mels:\n return self.mel_scaler.inverse_transform(S_denorm.T).T\n elif S_denorm.shape[0] == self.fft_size / 2:\n return self.linear_scaler.inverse_transform(S_denorm.T).T\n else:\n raise RuntimeError(\" [!] Mean-Var stats does not match the given feature dimensions.\")\n if self.symmetric_norm:\n if self.clip_norm:\n S_denorm = np.clip(\n S_denorm, -self.max_norm, self.max_norm # pylint: disable=invalid-unary-operand-type\n )\n S_denorm = ((S_denorm + self.max_norm) * -self.min_level_db / (2 * self.max_norm)) + self.min_level_db\n return S_denorm + self.ref_level_db\n else:\n if self.clip_norm:\n S_denorm = np.clip(S_denorm, 0, self.max_norm)\n S_denorm = (S_denorm * -self.min_level_db / self.max_norm) + self.min_level_db\n return S_denorm + self.ref_level_db\n else:\n return S_denorm\n\n ### Mean-STD scaling ###\n def load_stats(self, stats_path: str) -> Tuple[np.array, np.array, np.array, np.array, Dict]:\n \"\"\"Loading mean and variance statistics from a `npy` file.\n\n Args:\n stats_path (str): Path to the `npy` file containing\n\n Returns:\n Tuple[np.array, np.array, np.array, np.array, Dict]: loaded statistics and the config used to\n compute them.\n \"\"\"\n stats = np.load(stats_path, allow_pickle=True).item() # pylint: disable=unexpected-keyword-arg\n mel_mean = stats[\"mel_mean\"]\n mel_std = stats[\"mel_std\"]\n linear_mean = stats[\"linear_mean\"]\n linear_std = stats[\"linear_std\"]\n stats_config = stats[\"audio_config\"]\n # check all audio parameters used for computing stats\n skip_parameters = [\"griffin_lim_iters\", \"stats_path\", \"do_trim_silence\", \"ref_level_db\", \"power\"]\n for key in stats_config.keys():\n if key in skip_parameters:\n continue\n if key not in [\"sample_rate\", \"trim_db\"]:\n assert (\n stats_config[key] == self.__dict__[key]\n ), f\" [!] Audio param {key} does not match the value used for computing mean-var stats. {stats_config[key]} vs {self.__dict__[key]}\"\n return mel_mean, mel_std, linear_mean, linear_std, stats_config\n\n # pylint: disable=attribute-defined-outside-init\n def setup_scaler(\n self, mel_mean: np.ndarray, mel_std: np.ndarray, linear_mean: np.ndarray, linear_std: np.ndarray\n ) -> None:\n \"\"\"Initialize scaler objects used in mean-std normalization.\n\n Args:\n mel_mean (np.ndarray): Mean for melspectrograms.\n mel_std (np.ndarray): STD for melspectrograms.\n linear_mean (np.ndarray): Mean for full scale spectrograms.\n linear_std (np.ndarray): STD for full scale spectrograms.\n \"\"\"\n self.mel_scaler = StandardScaler()\n self.mel_scaler.set_stats(mel_mean, mel_std)\n self.linear_scaler = StandardScaler()\n self.linear_scaler.set_stats(linear_mean, linear_std)\n\n ### Preemphasis ###\n def apply_preemphasis(self, x: np.ndarray) -> np.ndarray:\n \"\"\"Apply pre-emphasis to the audio signal. Useful to reduce the correlation between neighbouring signal values.\n\n Args:\n x (np.ndarray): Audio signal.\n\n Raises:\n RuntimeError: Preemphasis coeff is set to 0.\n\n Returns:\n np.ndarray: Decorrelated audio signal.\n \"\"\"\n return preemphasis(x=x, coef=self.preemphasis)\n\n def apply_inv_preemphasis(self, x: np.ndarray) -> np.ndarray:\n \"\"\"Reverse pre-emphasis.\"\"\"\n return deemphasis(x=x, coef=self.preemphasis)\n\n ### SPECTROGRAMs ###\n def spectrogram(self, y: np.ndarray) -> np.ndarray:\n \"\"\"Compute a spectrogram from a waveform.\n\n Args:\n y (np.ndarray): Waveform.\n\n Returns:\n np.ndarray: Spectrogram.\n \"\"\"\n if self.preemphasis != 0:\n y = self.apply_preemphasis(y)\n D = stft(\n y=y,\n fft_size=self.fft_size,\n hop_length=self.hop_length,\n win_length=self.win_length,\n pad_mode=self.stft_pad_mode,\n )\n if self.do_amp_to_db_linear:\n S = amp_to_db(x=np.abs(D), gain=self.spec_gain, base=self.base)\n else:\n S = np.abs(D)\n return self.normalize(S).astype(np.float32)\n\n def melspectrogram(self, y: np.ndarray) -> np.ndarray:\n \"\"\"Compute a melspectrogram from a waveform.\"\"\"\n if self.preemphasis != 0:\n y = self.apply_preemphasis(y)\n D = stft(\n y=y,\n fft_size=self.fft_size,\n hop_length=self.hop_length,\n win_length=self.win_length,\n pad_mode=self.stft_pad_mode,\n )\n S = spec_to_mel(spec=np.abs(D), mel_basis=self.mel_basis)\n if self.do_amp_to_db_mel:\n S = amp_to_db(x=S, gain=self.spec_gain, base=self.base)\n\n return self.normalize(S).astype(np.float32)\n\n def inv_spectrogram(self, spectrogram: np.ndarray) -> np.ndarray:\n \"\"\"Convert a spectrogram to a waveform using Griffi-Lim vocoder.\"\"\"\n S = self.denormalize(spectrogram)\n S = db_to_amp(x=S, gain=self.spec_gain, base=self.base)\n # Reconstruct phase\n W = self._griffin_lim(S**self.power)\n return self.apply_inv_preemphasis(W) if self.preemphasis != 0 else W\n\n def inv_melspectrogram(self, mel_spectrogram: np.ndarray) -> np.ndarray:\n \"\"\"Convert a melspectrogram to a waveform using Griffi-Lim vocoder.\"\"\"\n D = self.denormalize(mel_spectrogram)\n S = db_to_amp(x=D, gain=self.spec_gain, base=self.base)\n S = mel_to_spec(mel=S, mel_basis=self.mel_basis) # Convert back to linear\n W = self._griffin_lim(S**self.power)\n return self.apply_inv_preemphasis(W) if self.preemphasis != 0 else W\n\n def out_linear_to_mel(self, linear_spec: np.ndarray) -> np.ndarray:\n \"\"\"Convert a full scale linear spectrogram output of a network to a melspectrogram.\n\n Args:\n linear_spec (np.ndarray): Normalized full scale linear spectrogram.\n\n Returns:\n np.ndarray: Normalized melspectrogram.\n \"\"\"\n S = self.denormalize(linear_spec)\n S = db_to_amp(x=S, gain=self.spec_gain, base=self.base)\n S = spec_to_mel(spec=np.abs(S), mel_basis=self.mel_basis)\n S = amp_to_db(x=S, gain=self.spec_gain, base=self.base)\n mel = self.normalize(S)\n return mel\n\n def _griffin_lim(self, S):\n return griffin_lim(\n spec=S,\n num_iter=self.griffin_lim_iters,\n hop_length=self.hop_length,\n win_length=self.win_length,\n fft_size=self.fft_size,\n pad_mode=self.stft_pad_mode,\n )\n\n def compute_f0(self, x: np.ndarray) -> np.ndarray:\n \"\"\"Compute pitch (f0) of a waveform using the same parameters used for computing melspectrogram.\n\n Args:\n x (np.ndarray): Waveform.\n\n Returns:\n np.ndarray: Pitch.\n\n Examples:\n >>> WAV_FILE = filename = librosa.example('vibeace')\n >>> from TTS.config import BaseAudioConfig\n >>> from TTS.utils.audio import AudioProcessor\n >>> conf = BaseAudioConfig(pitch_fmax=640, pitch_fmin=1)\n >>> ap = AudioProcessor(**conf)\n >>> wav = ap.load_wav(WAV_FILE, sr=ap.sample_rate)[:5 * ap.sample_rate]\n >>> pitch = ap.compute_f0(wav)\n \"\"\"\n # align F0 length to the spectrogram length\n if len(x) % self.hop_length == 0:\n x = np.pad(x, (0, self.hop_length // 2), mode=self.stft_pad_mode)\n\n f0 = compute_f0(\n x=x,\n pitch_fmax=self.pitch_fmax,\n pitch_fmin=self.pitch_fmin,\n hop_length=self.hop_length,\n win_length=self.win_length,\n sample_rate=self.sample_rate,\n stft_pad_mode=self.stft_pad_mode,\n center=True,\n )\n\n return f0\n\n ### Audio Processing ###\n def find_endpoint(self, wav: np.ndarray, min_silence_sec=0.8) -> int:\n \"\"\"Find the last point without silence at the end of a audio signal.\n\n Args:\n wav (np.ndarray): Audio signal.\n threshold_db (int, optional): Silence threshold in decibels. Defaults to -40.\n min_silence_sec (float, optional): Ignore silences that are shorter then this in secs. Defaults to 0.8.\n\n Returns:\n int: Last point without silence.\n \"\"\"\n return find_endpoint(\n wav=wav,\n trim_db=self.trim_db,\n sample_rate=self.sample_rate,\n min_silence_sec=min_silence_sec,\n gain=self.spec_gain,\n base=self.base,\n )\n\n def trim_silence(self, wav):\n \"\"\"Trim silent parts with a threshold and 0.01 sec margin\"\"\"\n return trim_silence(\n wav=wav,\n sample_rate=self.sample_rate,\n trim_db=self.trim_db,\n win_length=self.win_length,\n hop_length=self.hop_length,\n )\n\n @staticmethod\n def sound_norm(x: np.ndarray) -> np.ndarray:\n \"\"\"Normalize the volume of an audio signal.\n\n Args:\n x (np.ndarray): Raw waveform.\n\n Returns:\n np.ndarray: Volume normalized waveform.\n \"\"\"\n return volume_norm(x=x)\n\n def rms_volume_norm(self, x: np.ndarray, db_level: float = None) -> np.ndarray:\n \"\"\"Normalize the volume based on RMS of the signal.\n\n Args:\n x (np.ndarray): Raw waveform.\n\n Returns:\n np.ndarray: RMS normalized waveform.\n \"\"\"\n if db_level is None:\n db_level = self.db_level\n return rms_volume_norm(x=x, db_level=db_level)\n\n ### save and load ###\n def load_wav(self, filename: str, sr: int = None) -> np.ndarray:\n \"\"\"Read a wav file using Librosa and optionally resample, silence trim, volume normalize.\n\n Resampling slows down loading the file significantly. Therefore it is recommended to resample the file before.\n\n Args:\n filename (str): Path to the wav file.\n sr (int, optional): Sampling rate for resampling. Defaults to None.\n\n Returns:\n np.ndarray: Loaded waveform.\n \"\"\"\n if sr is not None:\n x = load_wav(filename=filename, sample_rate=sr, resample=True)\n else:\n x = load_wav(filename=filename, sample_rate=self.sample_rate, resample=self.resample)\n if self.do_trim_silence:\n try:\n x = self.trim_silence(x)\n except ValueError:\n print(f\" [!] File cannot be trimmed for silence - {filename}\")\n if self.do_sound_norm:\n x = self.sound_norm(x)\n if self.do_rms_norm:\n x = self.rms_volume_norm(x, self.db_level)\n return x\n\n def save_wav(self, wav: np.ndarray, path: str, sr: int = None, pipe_out=None) -> None:\n \"\"\"Save a waveform to a file using Scipy.\n\n Args:\n wav (np.ndarray): Waveform to save.\n path (str): Path to a output file.\n sr (int, optional): Sampling rate used for saving to the file. Defaults to None.\n pipe_out (BytesIO, optional): Flag to stdout the generated TTS wav file for shell pipe.\n \"\"\"\n if self.do_rms_norm:\n wav_norm = self.rms_volume_norm(wav, self.db_level) * 32767\n else:\n wav_norm = wav * (32767 / max(0.01, np.max(np.abs(wav))))\n\n wav_norm = wav_norm.astype(np.int16)\n if pipe_out:\n wav_buffer = BytesIO()\n scipy.io.wavfile.write(wav_buffer, sr if sr else self.sample_rate, wav_norm)\n wav_buffer.seek(0)\n pipe_out.buffer.write(wav_buffer.read())\n scipy.io.wavfile.write(path, sr if sr else self.sample_rate, wav_norm)\n\n def get_duration(self, filename: str) -> float:\n \"\"\"Get the duration of a wav file using Librosa.\n\n Args:\n filename (str): Path to the wav file.\n \"\"\"\n return librosa.get_duration(filename=filename)" }, { "identifier": "mulaw_decode", "path": "TTS/utils/audio/numpy_transforms.py", "snippet": "def mulaw_decode(*, wav, mulaw_qc: int, **kwargs) -> np.ndarray:\n \"\"\"Recovers waveform from quantized values.\"\"\"\n mu = 2**mulaw_qc - 1\n x = np.sign(wav) / mu * ((1 + mu) ** np.abs(wav) - 1)\n return x" }, { "identifier": "load_fsspec", "path": "TTS/utils/io.py", "snippet": "def load_fsspec(\n path: str,\n map_location: Union[str, Callable, torch.device, Dict[Union[str, torch.device], Union[str, torch.device]]] = None,\n cache: bool = True,\n **kwargs,\n) -> Any:\n \"\"\"Like torch.load but can load from other locations (e.g. s3:// , gs://).\n\n Args:\n path: Any path or url supported by fsspec.\n map_location: torch.device or str.\n cache: If True, cache a remote file locally for subsequent calls. It is cached under `get_user_data_dir()/tts_cache`. Defaults to True.\n **kwargs: Keyword arguments forwarded to torch.load.\n\n Returns:\n Object stored in path.\n \"\"\"\n is_local = os.path.isdir(path) or os.path.isfile(path)\n if cache and not is_local:\n with fsspec.open(\n f\"filecache::{path}\",\n filecache={\"cache_storage\": str(get_user_data_dir(\"tts_cache\"))},\n mode=\"rb\",\n ) as f:\n return torch.load(f, map_location=map_location, **kwargs)\n else:\n with fsspec.open(path, \"rb\") as f:\n return torch.load(f, map_location=map_location, **kwargs)" }, { "identifier": "WaveRNNDataset", "path": "TTS/vocoder/datasets/wavernn_dataset.py", "snippet": "class WaveRNNDataset(Dataset):\n \"\"\"\n WaveRNN Dataset searchs for all the wav files under root path\n and converts them to acoustic features on the fly.\n \"\"\"\n\n def __init__(\n self, ap, items, seq_len, hop_len, pad, mode, mulaw, is_training=True, verbose=False, return_segments=True\n ):\n super().__init__()\n self.ap = ap\n self.compute_feat = not isinstance(items[0], (tuple, list))\n self.item_list = items\n self.seq_len = seq_len\n self.hop_len = hop_len\n self.mel_len = seq_len // hop_len\n self.pad = pad\n self.mode = mode\n self.mulaw = mulaw\n self.is_training = is_training\n self.verbose = verbose\n self.return_segments = return_segments\n\n assert self.seq_len % self.hop_len == 0\n\n def __len__(self):\n return len(self.item_list)\n\n def __getitem__(self, index):\n item = self.load_item(index)\n return item\n\n def load_test_samples(self, num_samples):\n samples = []\n return_segments = self.return_segments\n self.return_segments = False\n for idx in range(num_samples):\n mel, audio, _ = self.load_item(idx)\n samples.append([mel, audio])\n self.return_segments = return_segments\n return samples\n\n def load_item(self, index):\n \"\"\"\n load (audio, feat) couple if feature_path is set\n else compute it on the fly\n \"\"\"\n if self.compute_feat:\n wavpath = self.item_list[index]\n audio = self.ap.load_wav(wavpath)\n if self.return_segments:\n min_audio_len = 2 * self.seq_len + (2 * self.pad * self.hop_len)\n else:\n min_audio_len = audio.shape[0] + (2 * self.pad * self.hop_len)\n if audio.shape[0] < min_audio_len:\n print(\" [!] Instance is too short! : {}\".format(wavpath))\n audio = np.pad(audio, [0, min_audio_len - audio.shape[0] + self.hop_len])\n mel = self.ap.melspectrogram(audio)\n\n if self.mode in [\"gauss\", \"mold\"]:\n x_input = audio\n elif isinstance(self.mode, int):\n x_input = (\n mulaw_encode(wav=audio, mulaw_qc=self.mode)\n if self.mulaw\n else quantize(x=audio, quantize_bits=self.mode)\n )\n else:\n raise RuntimeError(\"Unknown dataset mode - \", self.mode)\n\n else:\n wavpath, feat_path = self.item_list[index]\n mel = np.load(feat_path.replace(\"/quant/\", \"/mel/\"))\n\n if mel.shape[-1] < self.mel_len + 2 * self.pad:\n print(\" [!] Instance is too short! : {}\".format(wavpath))\n self.item_list[index] = self.item_list[index + 1]\n feat_path = self.item_list[index]\n mel = np.load(feat_path.replace(\"/quant/\", \"/mel/\"))\n if self.mode in [\"gauss\", \"mold\"]:\n x_input = self.ap.load_wav(wavpath)\n elif isinstance(self.mode, int):\n x_input = np.load(feat_path.replace(\"/mel/\", \"/quant/\"))\n else:\n raise RuntimeError(\"Unknown dataset mode - \", self.mode)\n\n return mel, x_input, wavpath\n\n def collate(self, batch):\n mel_win = self.seq_len // self.hop_len + 2 * self.pad\n max_offsets = [x[0].shape[-1] - (mel_win + 2 * self.pad) for x in batch]\n\n mel_offsets = [np.random.randint(0, offset) for offset in max_offsets]\n sig_offsets = [(offset + self.pad) * self.hop_len for offset in mel_offsets]\n\n mels = [x[0][:, mel_offsets[i] : mel_offsets[i] + mel_win] for i, x in enumerate(batch)]\n\n coarse = [x[1][sig_offsets[i] : sig_offsets[i] + self.seq_len + 1] for i, x in enumerate(batch)]\n\n mels = np.stack(mels).astype(np.float32)\n if self.mode in [\"gauss\", \"mold\"]:\n coarse = np.stack(coarse).astype(np.float32)\n coarse = torch.FloatTensor(coarse)\n x_input = coarse[:, : self.seq_len]\n elif isinstance(self.mode, int):\n coarse = np.stack(coarse).astype(np.int64)\n coarse = torch.LongTensor(coarse)\n x_input = 2 * coarse[:, : self.seq_len].float() / (2**self.mode - 1.0) - 1.0\n y_coarse = coarse[:, 1:]\n mels = torch.FloatTensor(mels)\n return x_input, mels, y_coarse" }, { "identifier": "WaveRNNLoss", "path": "TTS/vocoder/layers/losses.py", "snippet": "class WaveRNNLoss(nn.Module):\n def __init__(self, wave_rnn_mode: Union[str, int]):\n super().__init__()\n if wave_rnn_mode == \"mold\":\n self.loss_func = discretized_mix_logistic_loss\n elif wave_rnn_mode == \"gauss\":\n self.loss_func = gaussian_loss\n elif isinstance(wave_rnn_mode, int):\n self.loss_func = torch.nn.CrossEntropyLoss()\n else:\n raise ValueError(\" [!] Unknown mode for Wavernn.\")\n\n def forward(self, y_hat, y) -> Dict:\n loss = self.loss_func(y_hat, y)\n return {\"loss\": loss}" }, { "identifier": "BaseVocoder", "path": "TTS/vocoder/models/base_vocoder.py", "snippet": "class BaseVocoder(BaseTrainerModel):\n \"\"\"Base `vocoder` class. Every new `vocoder` model must inherit this.\n\n It defines `vocoder` specific functions on top of `Model`.\n\n Notes on input/output tensor shapes:\n Any input or output tensor of the model must be shaped as\n\n - 3D tensors `batch x time x channels`\n - 2D tensors `batch x channels`\n - 1D tensors `batch x 1`\n \"\"\"\n\n MODEL_TYPE = \"vocoder\"\n\n def __init__(self, config):\n super().__init__()\n self._set_model_args(config)\n\n def _set_model_args(self, config: Coqpit):\n \"\"\"Setup model args based on the config type.\n\n If the config is for training with a name like \"*Config\", then the model args are embeded in the\n config.model_args\n\n If the config is for the model with a name like \"*Args\", then we assign the directly.\n \"\"\"\n # don't use isintance not to import recursively\n if \"Config\" in config.__class__.__name__:\n if \"characters\" in config:\n _, self.config, num_chars = self.get_characters(config)\n self.config.num_chars = num_chars\n if hasattr(self.config, \"model_args\"):\n config.model_args.num_chars = num_chars\n if \"model_args\" in config:\n self.args = self.config.model_args\n # This is for backward compatibility\n if \"model_params\" in config:\n self.args = self.config.model_params\n else:\n self.config = config\n if \"model_args\" in config:\n self.args = self.config.model_args\n # This is for backward compatibility\n if \"model_params\" in config:\n self.args = self.config.model_params\n else:\n raise ValueError(\"config must be either a *Config or *Args\")" }, { "identifier": "sample_from_discretized_mix_logistic", "path": "TTS/vocoder/utils/distribution.py", "snippet": "def sample_from_discretized_mix_logistic(y, log_scale_min=None):\n \"\"\"\n Sample from discretized mixture of logistic distributions\n Args:\n y (Tensor): :math:`[B, C, T]`\n log_scale_min (float): Log scale minimum value\n Returns:\n Tensor: sample in range of [-1, 1].\n \"\"\"\n if log_scale_min is None:\n log_scale_min = float(np.log(1e-14))\n assert y.size(1) % 3 == 0\n nr_mix = y.size(1) // 3\n\n # B x T x C\n y = y.transpose(1, 2)\n logit_probs = y[:, :, :nr_mix]\n\n # sample mixture indicator from softmax\n temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)\n temp = logit_probs.data - torch.log(-torch.log(temp))\n _, argmax = temp.max(dim=-1)\n\n # (B, T) -> (B, T, nr_mix)\n one_hot = to_one_hot(argmax, nr_mix)\n # select logistic parameters\n means = torch.sum(y[:, :, nr_mix : 2 * nr_mix] * one_hot, dim=-1)\n log_scales = torch.clamp(torch.sum(y[:, :, 2 * nr_mix : 3 * nr_mix] * one_hot, dim=-1), min=log_scale_min)\n # sample from logistic & clip to interval\n # we don't actually round to the nearest 8bit value when sampling\n u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)\n x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u))\n\n x = torch.clamp(torch.clamp(x, min=-1.0), max=1.0)\n\n return x" }, { "identifier": "sample_from_gaussian", "path": "TTS/vocoder/utils/distribution.py", "snippet": "def sample_from_gaussian(y_hat, log_std_min=-7.0, scale_factor=1.0):\n assert y_hat.size(2) == 2\n mean = y_hat[:, :, :1]\n log_std = torch.clamp(y_hat[:, :, 1:], min=log_std_min)\n dist = Normal(\n mean,\n torch.exp(log_std),\n )\n sample = dist.sample()\n sample = torch.clamp(torch.clamp(sample, min=-scale_factor), max=scale_factor)\n del dist\n return sample" } ]
import sys import time import numpy as np import torch import torch.nn.functional as F from dataclasses import dataclass, field from typing import Dict, List, Tuple from coqpit import Coqpit from torch import nn from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from TTS.tts.utils.visual import plot_spectrogram from TTS.utils.audio import AudioProcessor from TTS.utils.audio.numpy_transforms import mulaw_decode from TTS.utils.io import load_fsspec from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset from TTS.vocoder.layers.losses import WaveRNNLoss from TTS.vocoder.models.base_vocoder import BaseVocoder from TTS.vocoder.utils.distribution import sample_from_discretized_mix_logistic, sample_from_gaussian
12,234
self.args.pad, self.args.num_res_blocks, self.args.feat_dims, self.args.compute_dims, self.args.res_out_dims, self.args.use_aux_net, ) if self.args.use_aux_net: self.I = nn.Linear(self.args.feat_dims + self.aux_dims + 1, self.args.rnn_dims) self.rnn1 = nn.GRU(self.args.rnn_dims, self.args.rnn_dims, batch_first=True) self.rnn2 = nn.GRU(self.args.rnn_dims + self.aux_dims, self.args.rnn_dims, batch_first=True) self.fc1 = nn.Linear(self.args.rnn_dims + self.aux_dims, self.args.fc_dims) self.fc2 = nn.Linear(self.args.fc_dims + self.aux_dims, self.args.fc_dims) self.fc3 = nn.Linear(self.args.fc_dims, self.n_classes) else: self.I = nn.Linear(self.args.feat_dims + 1, self.args.rnn_dims) self.rnn1 = nn.GRU(self.args.rnn_dims, self.args.rnn_dims, batch_first=True) self.rnn2 = nn.GRU(self.args.rnn_dims, self.args.rnn_dims, batch_first=True) self.fc1 = nn.Linear(self.args.rnn_dims, self.args.fc_dims) self.fc2 = nn.Linear(self.args.fc_dims, self.args.fc_dims) self.fc3 = nn.Linear(self.args.fc_dims, self.n_classes) def forward(self, x, mels): bsize = x.size(0) h1 = torch.zeros(1, bsize, self.args.rnn_dims).to(x.device) h2 = torch.zeros(1, bsize, self.args.rnn_dims).to(x.device) mels, aux = self.upsample(mels) if self.args.use_aux_net: aux_idx = [self.aux_dims * i for i in range(5)] a1 = aux[:, :, aux_idx[0] : aux_idx[1]] a2 = aux[:, :, aux_idx[1] : aux_idx[2]] a3 = aux[:, :, aux_idx[2] : aux_idx[3]] a4 = aux[:, :, aux_idx[3] : aux_idx[4]] x = ( torch.cat([x.unsqueeze(-1), mels, a1], dim=2) if self.args.use_aux_net else torch.cat([x.unsqueeze(-1), mels], dim=2) ) x = self.I(x) res = x self.rnn1.flatten_parameters() x, _ = self.rnn1(x, h1) x = x + res res = x x = torch.cat([x, a2], dim=2) if self.args.use_aux_net else x self.rnn2.flatten_parameters() x, _ = self.rnn2(x, h2) x = x + res x = torch.cat([x, a3], dim=2) if self.args.use_aux_net else x x = F.relu(self.fc1(x)) x = torch.cat([x, a4], dim=2) if self.args.use_aux_net else x x = F.relu(self.fc2(x)) return self.fc3(x) def inference(self, mels, batched=None, target=None, overlap=None): self.eval() output = [] start = time.time() rnn1 = self.get_gru_cell(self.rnn1) rnn2 = self.get_gru_cell(self.rnn2) with torch.no_grad(): if isinstance(mels, np.ndarray): mels = torch.FloatTensor(mels).to(str(next(self.parameters()).device)) if mels.ndim == 2: mels = mels.unsqueeze(0) wave_len = (mels.size(-1) - 1) * self.config.audio.hop_length mels = self.pad_tensor(mels.transpose(1, 2), pad=self.args.pad, side="both") mels, aux = self.upsample(mels.transpose(1, 2)) if batched: mels = self.fold_with_overlap(mels, target, overlap) if aux is not None: aux = self.fold_with_overlap(aux, target, overlap) b_size, seq_len, _ = mels.size() h1 = torch.zeros(b_size, self.args.rnn_dims).type_as(mels) h2 = torch.zeros(b_size, self.args.rnn_dims).type_as(mels) x = torch.zeros(b_size, 1).type_as(mels) if self.args.use_aux_net: d = self.aux_dims aux_split = [aux[:, :, d * i : d * (i + 1)] for i in range(4)] for i in range(seq_len): m_t = mels[:, i, :] if self.args.use_aux_net: a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split) x = torch.cat([x, m_t, a1_t], dim=1) if self.args.use_aux_net else torch.cat([x, m_t], dim=1) x = self.I(x) h1 = rnn1(x, h1) x = x + h1 inp = torch.cat([x, a2_t], dim=1) if self.args.use_aux_net else x h2 = rnn2(inp, h2) x = x + h2 x = torch.cat([x, a3_t], dim=1) if self.args.use_aux_net else x x = F.relu(self.fc1(x)) x = torch.cat([x, a4_t], dim=1) if self.args.use_aux_net else x x = F.relu(self.fc2(x)) logits = self.fc3(x) if self.args.mode == "mold": sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2)) output.append(sample.view(-1)) x = sample.transpose(0, 1).type_as(mels) elif self.args.mode == "gauss":
def stream(string, variables): sys.stdout.write(f"\r{string}" % variables) # pylint: disable=abstract-method # relates https://github.com/pytorch/pytorch/issues/42305 class ResBlock(nn.Module): def __init__(self, dims): super().__init__() self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) self.batch_norm1 = nn.BatchNorm1d(dims) self.batch_norm2 = nn.BatchNorm1d(dims) def forward(self, x): residual = x x = self.conv1(x) x = self.batch_norm1(x) x = F.relu(x) x = self.conv2(x) x = self.batch_norm2(x) return x + residual class MelResNet(nn.Module): def __init__(self, num_res_blocks, in_dims, compute_dims, res_out_dims, pad): super().__init__() k_size = pad * 2 + 1 self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False) self.batch_norm = nn.BatchNorm1d(compute_dims) self.layers = nn.ModuleList() for _ in range(num_res_blocks): self.layers.append(ResBlock(compute_dims)) self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1) def forward(self, x): x = self.conv_in(x) x = self.batch_norm(x) x = F.relu(x) for f in self.layers: x = f(x) x = self.conv_out(x) return x class Stretch2d(nn.Module): def __init__(self, x_scale, y_scale): super().__init__() self.x_scale = x_scale self.y_scale = y_scale def forward(self, x): b, c, h, w = x.size() x = x.unsqueeze(-1).unsqueeze(3) x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale) return x.view(b, c, h * self.y_scale, w * self.x_scale) class UpsampleNetwork(nn.Module): def __init__( self, feat_dims, upsample_scales, compute_dims, num_res_blocks, res_out_dims, pad, use_aux_net, ): super().__init__() self.total_scale = np.cumproduct(upsample_scales)[-1] self.indent = pad * self.total_scale self.use_aux_net = use_aux_net if use_aux_net: self.resnet = MelResNet(num_res_blocks, feat_dims, compute_dims, res_out_dims, pad) self.resnet_stretch = Stretch2d(self.total_scale, 1) self.up_layers = nn.ModuleList() for scale in upsample_scales: k_size = (1, scale * 2 + 1) padding = (0, scale) stretch = Stretch2d(scale, 1) conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False) conv.weight.data.fill_(1.0 / k_size[1]) self.up_layers.append(stretch) self.up_layers.append(conv) def forward(self, m): if self.use_aux_net: aux = self.resnet(m).unsqueeze(1) aux = self.resnet_stretch(aux) aux = aux.squeeze(1) aux = aux.transpose(1, 2) else: aux = None m = m.unsqueeze(1) for f in self.up_layers: m = f(m) m = m.squeeze(1)[:, :, self.indent : -self.indent] return m.transpose(1, 2), aux class Upsample(nn.Module): def __init__(self, scale, pad, num_res_blocks, feat_dims, compute_dims, res_out_dims, use_aux_net): super().__init__() self.scale = scale self.pad = pad self.indent = pad * scale self.use_aux_net = use_aux_net self.resnet = MelResNet(num_res_blocks, feat_dims, compute_dims, res_out_dims, pad) def forward(self, m): if self.use_aux_net: aux = self.resnet(m) aux = torch.nn.functional.interpolate(aux, scale_factor=self.scale, mode="linear", align_corners=True) aux = aux.transpose(1, 2) else: aux = None m = torch.nn.functional.interpolate(m, scale_factor=self.scale, mode="linear", align_corners=True) m = m[:, :, self.indent : -self.indent] m = m * 0.045 # empirically found return m.transpose(1, 2), aux @dataclass class WavernnArgs(Coqpit): """🐸 WaveRNN model arguments. rnn_dims (int): Number of hidden channels in RNN layers. Defaults to 512. fc_dims (int): Number of hidden channels in fully-conntected layers. Defaults to 512. compute_dims (int): Number of hidden channels in the feature ResNet. Defaults to 128. res_out_dim (int): Number of hidden channels in the feature ResNet output. Defaults to 128. num_res_blocks (int): Number of residual blocks in the ResNet. Defaults to 10. use_aux_net (bool): enable/disable the feature ResNet. Defaults to True. use_upsample_net (bool): enable/ disable the upsampling networl. If False, basic upsampling is used. Defaults to True. upsample_factors (list): Upsampling factors. The multiply of the values must match the `hop_length`. Defaults to ```[4, 8, 8]```. mode (str): Output mode of the WaveRNN vocoder. `mold` for Mixture of Logistic Distribution, `gauss` for a single Gaussian Distribution and `bits` for quantized bits as the model's output. mulaw (bool): enable / disable the use of Mulaw quantization for training. Only applicable if `mode == 'bits'`. Defaults to `True`. pad (int): Padding applied to the input feature frames against the convolution layers of the feature network. Defaults to 2. """ rnn_dims: int = 512 fc_dims: int = 512 compute_dims: int = 128 res_out_dims: int = 128 num_res_blocks: int = 10 use_aux_net: bool = True use_upsample_net: bool = True upsample_factors: List[int] = field(default_factory=lambda: [4, 8, 8]) mode: str = "mold" # mold [string], gauss [string], bits [int] mulaw: bool = True # apply mulaw if mode is bits pad: int = 2 feat_dims: int = 80 class Wavernn(BaseVocoder): def __init__(self, config: Coqpit): """🐸 WaveRNN model. Original paper - https://arxiv.org/abs/1802.08435 Official implementation - https://github.com/fatchord/WaveRNN Args: config (Coqpit): [description] Raises: RuntimeError: [description] Examples: >>> from TTS.vocoder.configs import WavernnConfig >>> config = WavernnConfig() >>> model = Wavernn(config) Paper Abstract: Sequential models achieve state-of-the-art results in audio, visual and textual domains with respect to both estimating the data distribution and generating high-quality samples. Efficient sampling for this class of models has however remained an elusive problem. With a focus on text-to-speech synthesis, we describe a set of general techniques for reducing sampling time while maintaining high output quality. We first describe a single-layer recurrent neural network, the WaveRNN, with a dual softmax layer that matches the quality of the state-of-the-art WaveNet model. The compact form of the network makes it possible to generate 24kHz 16-bit audio 4x faster than real time on a GPU. Second, we apply a weight pruning technique to reduce the number of weights in the WaveRNN. We find that, for a constant number of parameters, large sparse networks perform better than small dense networks and this relationship holds for sparsity levels beyond 96%. The small number of weights in a Sparse WaveRNN makes it possible to sample high-fidelity audio on a mobile CPU in real time. Finally, we propose a new generation scheme based on subscaling that folds a long sequence into a batch of shorter sequences and allows one to generate multiple samples at once. The Subscale WaveRNN produces 16 samples per step without loss of quality and offers an orthogonal method for increasing sampling efficiency. """ super().__init__(config) if isinstance(self.args.mode, int): self.n_classes = 2**self.args.mode elif self.args.mode == "mold": self.n_classes = 3 * 10 elif self.args.mode == "gauss": self.n_classes = 2 else: raise RuntimeError("Unknown model mode value - ", self.args.mode) self.ap = AudioProcessor(**config.audio.to_dict()) self.aux_dims = self.args.res_out_dims // 4 if self.args.use_upsample_net: assert ( np.cumproduct(self.args.upsample_factors)[-1] == config.audio.hop_length ), " [!] upsample scales needs to be equal to hop_length" self.upsample = UpsampleNetwork( self.args.feat_dims, self.args.upsample_factors, self.args.compute_dims, self.args.num_res_blocks, self.args.res_out_dims, self.args.pad, self.args.use_aux_net, ) else: self.upsample = Upsample( config.audio.hop_length, self.args.pad, self.args.num_res_blocks, self.args.feat_dims, self.args.compute_dims, self.args.res_out_dims, self.args.use_aux_net, ) if self.args.use_aux_net: self.I = nn.Linear(self.args.feat_dims + self.aux_dims + 1, self.args.rnn_dims) self.rnn1 = nn.GRU(self.args.rnn_dims, self.args.rnn_dims, batch_first=True) self.rnn2 = nn.GRU(self.args.rnn_dims + self.aux_dims, self.args.rnn_dims, batch_first=True) self.fc1 = nn.Linear(self.args.rnn_dims + self.aux_dims, self.args.fc_dims) self.fc2 = nn.Linear(self.args.fc_dims + self.aux_dims, self.args.fc_dims) self.fc3 = nn.Linear(self.args.fc_dims, self.n_classes) else: self.I = nn.Linear(self.args.feat_dims + 1, self.args.rnn_dims) self.rnn1 = nn.GRU(self.args.rnn_dims, self.args.rnn_dims, batch_first=True) self.rnn2 = nn.GRU(self.args.rnn_dims, self.args.rnn_dims, batch_first=True) self.fc1 = nn.Linear(self.args.rnn_dims, self.args.fc_dims) self.fc2 = nn.Linear(self.args.fc_dims, self.args.fc_dims) self.fc3 = nn.Linear(self.args.fc_dims, self.n_classes) def forward(self, x, mels): bsize = x.size(0) h1 = torch.zeros(1, bsize, self.args.rnn_dims).to(x.device) h2 = torch.zeros(1, bsize, self.args.rnn_dims).to(x.device) mels, aux = self.upsample(mels) if self.args.use_aux_net: aux_idx = [self.aux_dims * i for i in range(5)] a1 = aux[:, :, aux_idx[0] : aux_idx[1]] a2 = aux[:, :, aux_idx[1] : aux_idx[2]] a3 = aux[:, :, aux_idx[2] : aux_idx[3]] a4 = aux[:, :, aux_idx[3] : aux_idx[4]] x = ( torch.cat([x.unsqueeze(-1), mels, a1], dim=2) if self.args.use_aux_net else torch.cat([x.unsqueeze(-1), mels], dim=2) ) x = self.I(x) res = x self.rnn1.flatten_parameters() x, _ = self.rnn1(x, h1) x = x + res res = x x = torch.cat([x, a2], dim=2) if self.args.use_aux_net else x self.rnn2.flatten_parameters() x, _ = self.rnn2(x, h2) x = x + res x = torch.cat([x, a3], dim=2) if self.args.use_aux_net else x x = F.relu(self.fc1(x)) x = torch.cat([x, a4], dim=2) if self.args.use_aux_net else x x = F.relu(self.fc2(x)) return self.fc3(x) def inference(self, mels, batched=None, target=None, overlap=None): self.eval() output = [] start = time.time() rnn1 = self.get_gru_cell(self.rnn1) rnn2 = self.get_gru_cell(self.rnn2) with torch.no_grad(): if isinstance(mels, np.ndarray): mels = torch.FloatTensor(mels).to(str(next(self.parameters()).device)) if mels.ndim == 2: mels = mels.unsqueeze(0) wave_len = (mels.size(-1) - 1) * self.config.audio.hop_length mels = self.pad_tensor(mels.transpose(1, 2), pad=self.args.pad, side="both") mels, aux = self.upsample(mels.transpose(1, 2)) if batched: mels = self.fold_with_overlap(mels, target, overlap) if aux is not None: aux = self.fold_with_overlap(aux, target, overlap) b_size, seq_len, _ = mels.size() h1 = torch.zeros(b_size, self.args.rnn_dims).type_as(mels) h2 = torch.zeros(b_size, self.args.rnn_dims).type_as(mels) x = torch.zeros(b_size, 1).type_as(mels) if self.args.use_aux_net: d = self.aux_dims aux_split = [aux[:, :, d * i : d * (i + 1)] for i in range(4)] for i in range(seq_len): m_t = mels[:, i, :] if self.args.use_aux_net: a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split) x = torch.cat([x, m_t, a1_t], dim=1) if self.args.use_aux_net else torch.cat([x, m_t], dim=1) x = self.I(x) h1 = rnn1(x, h1) x = x + h1 inp = torch.cat([x, a2_t], dim=1) if self.args.use_aux_net else x h2 = rnn2(inp, h2) x = x + h2 x = torch.cat([x, a3_t], dim=1) if self.args.use_aux_net else x x = F.relu(self.fc1(x)) x = torch.cat([x, a4_t], dim=1) if self.args.use_aux_net else x x = F.relu(self.fc2(x)) logits = self.fc3(x) if self.args.mode == "mold": sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2)) output.append(sample.view(-1)) x = sample.transpose(0, 1).type_as(mels) elif self.args.mode == "gauss":
sample = sample_from_gaussian(logits.unsqueeze(0).transpose(1, 2))
8
2023-11-29 08:15:06+00:00
16k
AILab-CVC/UniRepLKNet
Video/dataset/build.py
[ { "identifier": "RawFrameClsDataset", "path": "Video/dataset/datasets.py", "snippet": "class RawFrameClsDataset(Dataset):\n \"\"\"Load your own raw frame classification dataset.\"\"\"\n\n def __init__(self,\n anno_path,\n data_root,\n mode='train',\n clip_len=8,\n crop_size=224,\n short_side_size=256,\n new_height=256,\n new_width=340,\n keep_aspect_ratio=True,\n num_segment=1,\n num_crop=1,\n test_num_segment=10,\n test_num_crop=3,\n filename_tmpl='img_{:05}.jpg',\n start_idx=1,\n args=None):\n self.anno_path = anno_path\n self.data_root = data_root\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.filename_tmpl = filename_tmpl\n self.start_idx = start_idx\n self.args = args\n self.aug = False\n self.rand_erase = False\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n\n self.image_loader = get_image_loader()\n\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(\n cleaned[0].apply(lambda row: os.path.join(self.data_root, row)))\n self.total_frames = list(cleaned.values[:, 1])\n self.label_array = list(cleaned.values[:, -1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(\n self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(\n size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(\n size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_total_frames = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n self.test_seg.append((ck, cp))\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_total_frames.append(self.total_frames[idx])\n self.test_label_array.append(self.label_array[idx])\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(\n sample, total_frame, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during training\".format(\n sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(\n sample, total_frame, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n\n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample, total_frame)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during validation\".\n format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_frame(sample, total_frame)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n while len(buffer) == 0:\n warnings.warn(\n \"video {}, temporal {}, spatial {} not found during testing\"\n .format(str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) -\n self.short_side_size) / (\n self.test_num_crop - 1)\n temporal_start = chunk_nb\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::self.test_num_segment,\n spatial_start:spatial_start +\n self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::self.test_num_segment, :,\n spatial_start:spatial_start +\n self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0], chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(self, buffer, args):\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [transforms.ToPILImage()(frame) for frame in buffer]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C\n\n # T H W C\n buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False)\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def load_frame(self, sample, num_frames, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if self.mode == 'test':\n tick = num_frames / float(self.num_segment)\n all_index = []\n for t_seg in range(self.test_num_segment):\n tmp_index = [\n int(t_seg * tick / self.test_num_segment + tick * x)\n for x in range(self.num_segment)\n ]\n all_index.extend(tmp_index)\n all_index = list(np.sort(np.array(all_index) + self.start_idx))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname,\n self.filename_tmpl.format(idx))\n img = self.image_loader(frame_fname)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n # handle temporal segments\n average_duration = num_frames // self.num_segment\n all_index = []\n if average_duration > 0:\n if self.mode == 'validation':\n all_index = list(\n np.multiply(\n list(range(self.num_segment)), average_duration) +\n np.ones(self.num_segment, dtype=int) *\n (average_duration // 2))\n else:\n all_index = list(\n np.multiply(\n list(range(self.num_segment)), average_duration) +\n np.random.randint(average_duration, size=self.num_segment))\n elif num_frames > self.num_segment:\n if self.mode == 'validation':\n all_index = list(range(self.num_segment))\n else:\n all_index = list(\n np.sort(\n np.random.randint(num_frames, size=self.num_segment)))\n else:\n all_index = [0] * (self.num_segment - num_frames) + list(\n range(num_frames))\n all_index = list(np.array(all_index) + self.start_idx)\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx))\n img = self.image_loader(frame_fname)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "VideoClsDataset", "path": "Video/dataset/datasets.py", "snippet": "class VideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self,\n anno_path,\n data_root='',\n mode='train',\n clip_len=8,\n frame_sample_rate=2,\n crop_size=224,\n short_side_size=256,\n new_height=256,\n new_width=340,\n keep_aspect_ratio=True,\n num_segment=1,\n num_crop=1,\n test_num_segment=10,\n test_num_crop=3,\n sparse_sample=False,\n args=None):\n self.anno_path = anno_path\n self.data_root = data_root\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.sparse_sample = sparse_sample\n self.args = args\n self.aug = False\n self.rand_erase = False\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n\n self.video_loader = get_video_loader()\n\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(\n cleaned[0].apply(lambda row: os.path.join(self.data_root, row)))\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(\n self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(\n size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(\n size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n\n sample = self.dataset_samples[index]\n # T H W C\n buffer = self.load_video(sample, sample_rate_scale=scale_t)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during training\".format(\n sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_video(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n\n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.load_video(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during validation\".\n format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_video(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_video(sample)\n\n while len(buffer) == 0:\n warnings.warn(\n \"video {}, temporal {}, spatial {} not found during testing\"\n .format(str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_video(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.sparse_sample:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) -\n self.short_side_size) / (\n self.test_num_crop - 1)\n temporal_start = chunk_nb\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::self.test_num_segment,\n spatial_start:spatial_start +\n self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::self.test_num_segment, :,\n spatial_start:spatial_start +\n self.short_side_size, :]\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) -\n self.short_side_size) / (\n self.test_num_crop - 1)\n temporal_step = max(\n 1.0 * (buffer.shape[0] - self.clip_len) /\n (self.test_num_segment - 1), 0)\n temporal_start = int(chunk_nb * temporal_step)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start:temporal_start +\n self.clip_len,\n spatial_start:spatial_start +\n self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start:temporal_start +\n self.clip_len, :,\n spatial_start:spatial_start +\n self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0], chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(self, buffer, args):\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [transforms.ToPILImage()(frame) for frame in buffer]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C\n\n # T H W C\n buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n # crop_size=224,\n crop_size=args.input_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False)\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W\n\n return buffer\n\n def load_video(self, sample, sample_rate_scale=1):\n fname = sample\n\n try:\n vr = self.video_loader(fname)\n except Exception as e:\n print(f\"Failed to load video from {fname} with error {e}!\")\n return []\n\n length = len(vr)\n\n if self.mode == 'test':\n if self.sparse_sample:\n tick = length / float(self.num_segment)\n all_index = []\n for t_seg in range(self.test_num_segment):\n tmp_index = [\n int(t_seg * tick / self.test_num_segment + tick * x)\n for x in range(self.num_segment)\n ]\n all_index.extend(tmp_index)\n all_index = list(np.sort(np.array(all_index)))\n else:\n all_index = [\n x for x in range(0, length, self.frame_sample_rate)\n ]\n while len(all_index) < self.clip_len:\n all_index.append(all_index[-1])\n\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n converted_len = int(self.clip_len * self.frame_sample_rate)\n seg_len = length // self.num_segment\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(\n 0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate(\n (index,\n np.ones(self.clip_len - seg_len // self.frame_sample_rate)\n * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n if self.mode == 'validation':\n end_idx = (converted_len + seg_len) // 2\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.clip_len)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i * seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "DataAugmentationForVideoMAEv2", "path": "Video/dataset/pretrain_datasets.py", "snippet": "class DataAugmentationForVideoMAEv2(object):\n\n def __init__(self, args):\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n div = True\n roll = False\n normalize = GroupNormalize(self.input_mean, self.input_std)\n self.train_augmentation = GroupMultiScaleCrop(args.input_size,\n [1, .875, .75, .66])\n self.transform = transforms.Compose([\n self.train_augmentation,\n Stack(roll=roll),\n ToTorchFormatTensor(div=div),\n normalize,\n ])\n if args.mask_type == 'tube':\n self.encoder_mask_map_generator = TubeMaskingGenerator(\n args.window_size, args.mask_ratio)\n else:\n raise NotImplementedError(\n 'Unsupported encoder masking strategy type.')\n if args.decoder_mask_ratio > 0.:\n if args.decoder_mask_type == 'run_cell':\n self.decoder_mask_map_generator = RunningCellMaskingGenerator(\n args.window_size, args.decoder_mask_ratio)\n else:\n raise NotImplementedError(\n 'Unsupported decoder masking strategy type.')\n\n def __call__(self, images):\n process_data, _ = self.transform(images)\n encoder_mask_map = self.encoder_mask_map_generator()\n if hasattr(self, 'decoder_mask_map_generator'):\n decoder_mask_map = self.decoder_mask_map_generator()\n else:\n decoder_mask_map = 1 - encoder_mask_map\n return process_data, encoder_mask_map, decoder_mask_map\n\n def __repr__(self):\n repr = \"(DataAugmentationForVideoMAEv2,\\n\"\n repr += \" transform = %s,\\n\" % str(self.transform)\n repr += \" Encoder Masking Generator = %s,\\n\" % str(\n self.encoder_mask_map_generator)\n if hasattr(self, 'decoder_mask_map_generator'):\n repr += \" Decoder Masking Generator = %s,\\n\" % str(\n self.decoder_mask_map_generator)\n else:\n repr += \" Do not use decoder masking,\\n\"\n repr += \")\"\n return repr" }, { "identifier": "HybridVideoMAE", "path": "Video/dataset/pretrain_datasets.py", "snippet": "class HybridVideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own videomae pretraining dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are four items in each line:\n (1) video path; (2) start_idx, (3) total frames and (4) video label.\n for pre-train video data\n total frames < 0, start_idx and video label meaningless\n for pre-train rawframe data\n video label meaningless\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default 'img_{:05}.jpg'.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n transform : function, default None.\n A function that takes data and label and transforms them.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n num_sample : int, default 1.\n Number of sampled views for Repeated Augmentation.\n \"\"\"\n\n def __init__(self,\n root,\n setting,\n train=True,\n test_mode=False,\n name_pattern='img_{:05}.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n lazy_init=False,\n num_sample=1):\n\n super(HybridVideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_ext = video_ext\n self.transform = transform\n self.lazy_init = lazy_init\n self.num_sample = num_sample\n\n # NOTE:\n # for hybrid train\n # different frame naming formats are used for different datasets\n # should MODIFY the fname_tmpl to your own situation\n self.ava_fname_tmpl = 'image_{:06}.jpg'\n self.ssv2_fname_tmpl = 'img_{:05}.jpg'\n\n # NOTE:\n # we set sampling_rate = 2 for ssv2\n # thus being consistent with the fine-tuning stage\n # Note that the ssv2 we use is decoded to frames at 12 fps;\n # if decoded at 24 fps, the sample interval should be 4.\n self.ssv2_skip_length = self.new_length * 2\n self.orig_skip_length = self.skip_length\n\n self.video_loader = get_video_loader()\n self.image_loader = get_image_loader()\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise (\n RuntimeError(\"Found 0 video clips in subfolders of: \" +\n root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n try:\n video_name, start_idx, total_frame = self.clips[index]\n self.skip_length = self.orig_skip_length\n\n if total_frame < 0:\n decord_vr = self.video_loader(video_name)\n duration = len(decord_vr)\n\n segment_indices, skip_offsets = self._sample_train_indices(\n duration)\n frame_id_list = self.get_frame_id_list(duration,\n segment_indices,\n skip_offsets)\n video_data = decord_vr.get_batch(frame_id_list).asnumpy()\n images = [\n Image.fromarray(video_data[vid, :, :, :]).convert('RGB')\n for vid, _ in enumerate(frame_id_list)\n ]\n\n else:\n # ssv2 & ava & other rawframe dataset\n if 'SomethingV2' in video_name:\n self.skip_length = self.ssv2_skip_length\n fname_tmpl = self.ssv2_fname_tmpl\n elif 'AVA2.2' in video_name:\n fname_tmpl = self.ava_fname_tmpl\n else:\n fname_tmpl = self.name_pattern\n\n segment_indices, skip_offsets = self._sample_train_indices(\n total_frame)\n frame_id_list = self.get_frame_id_list(total_frame,\n segment_indices,\n skip_offsets)\n\n images = []\n for idx in frame_id_list:\n frame_fname = os.path.join(\n video_name, fname_tmpl.format(idx + start_idx))\n img = self.image_loader(frame_fname)\n img = Image.fromarray(img)\n images.append(img)\n\n except Exception as e:\n print(\"Failed to load video from {} with error {}\".format(\n video_name, e))\n index = random.randint(0, len(self.clips) - 1)\n return self.__getitem__(index)\n\n if self.num_sample > 1:\n process_data_list = []\n encoder_mask_list = []\n decoder_mask_list = []\n for _ in range(self.num_sample):\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n process_data_list.append(process_data)\n encoder_mask_list.append(encoder_mask)\n decoder_mask_list.append(decoder_mask)\n return process_data_list, encoder_mask_list, decoder_mask_list\n else:\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n # T*C,H,W -> T,C,H,W -> C,T,H,W\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n return process_data, encoder_mask, decoder_mask\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, root, setting):\n if not os.path.exists(setting):\n raise (RuntimeError(\n \"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \"\n % (setting)))\n clips = []\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(' ')\n # line format: video_path, video_duration, video_label\n if len(line_info) < 2:\n raise (RuntimeError(\n 'Video input format is not correct, missing one or more element. %s'\n % line))\n clip_path = os.path.join(root, line_info[0])\n start_idx = int(line_info[1])\n total_frame = int(line_info[2])\n item = (clip_path, start_idx, total_frame)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length +\n 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(\n list(range(self.num_segments)), average_duration)\n offsets = offsets + np.random.randint(\n average_duration, size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(\n np.random.randint(\n num_frames - self.skip_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments, ))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n def get_frame_id_list(self, duration, indices, skip_offsets):\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n return frame_id_list" }, { "identifier": "VideoMAE", "path": "Video/dataset/pretrain_datasets.py", "snippet": "class VideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own videomae pretraining dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are four items in each line:\n (1) video path; (2) start_idx, (3) total frames and (4) video label.\n for pre-train video data\n total frames < 0, start_idx and video label meaningless\n for pre-train rawframe data\n video label meaningless\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default 'img_{:05}.jpg'.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n transform : function, default None.\n A function that takes data and label and transforms them.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n num_sample : int, default 1.\n Number of sampled views for Repeated Augmentation.\n \"\"\"\n\n def __init__(self,\n root,\n setting,\n train=True,\n test_mode=False,\n name_pattern='img_{:05}.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n lazy_init=False,\n num_sample=1):\n\n super(VideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_ext = video_ext\n self.transform = transform\n self.lazy_init = lazy_init\n self.num_sample = num_sample\n\n self.video_loader = get_video_loader()\n self.image_loader = get_image_loader()\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise (\n RuntimeError(\"Found 0 video clips in subfolders of: \" +\n root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n try:\n video_name, start_idx, total_frame = self.clips[index]\n if total_frame < 0: # load video\n decord_vr = self.video_loader(video_name)\n duration = len(decord_vr)\n\n segment_indices, skip_offsets = self._sample_train_indices(\n duration)\n frame_id_list = self.get_frame_id_list(duration,\n segment_indices,\n skip_offsets)\n video_data = decord_vr.get_batch(frame_id_list).asnumpy()\n images = [\n Image.fromarray(video_data[vid, :, :, :]).convert('RGB')\n for vid, _ in enumerate(frame_id_list)\n ]\n else: # load frames\n segment_indices, skip_offsets = self._sample_train_indices(\n total_frame)\n frame_id_list = self.get_frame_id_list(total_frame,\n segment_indices,\n skip_offsets)\n\n images = []\n for idx in frame_id_list:\n frame_fname = os.path.join(\n video_name, self.name_pattern.format(idx + start_idx))\n img = self.image_loader(frame_fname)\n img = Image.fromarray(img)\n images.append(img)\n\n except Exception as e:\n print(\"Failed to load video from {} with error {}\".format(\n video_name, e))\n index = random.randint(0, len(self.clips) - 1)\n return self.__getitem__(index)\n\n if self.num_sample > 1:\n process_data_list = []\n encoder_mask_list = []\n decoder_mask_list = []\n for _ in range(self.num_sample):\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n process_data_list.append(process_data)\n encoder_mask_list.append(encoder_mask)\n decoder_mask_list.append(decoder_mask)\n return process_data_list, encoder_mask_list, decoder_mask_list\n else:\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n # T*C,H,W -> T,C,H,W -> C,T,H,W\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n return process_data, encoder_mask, decoder_mask\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, root, setting):\n if not os.path.exists(setting):\n raise (RuntimeError(\n \"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \"\n % (setting)))\n clips = []\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(' ')\n # line format: video_path, start_idx, total_frames\n if len(line_info) < 3:\n raise (RuntimeError(\n 'Video input format is not correct, missing one or more element. %s'\n % line))\n clip_path = os.path.join(root, line_info[0])\n start_idx = int(line_info[1])\n total_frame = int(line_info[2])\n item = (clip_path, start_idx, total_frame)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length +\n 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(\n list(range(self.num_segments)), average_duration)\n offsets = offsets + np.random.randint(\n average_duration, size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(\n np.random.randint(\n num_frames - self.skip_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments, ))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n def get_frame_id_list(self, duration, indices, skip_offsets):\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n return frame_id_list" } ]
import os from .datasets import RawFrameClsDataset, VideoClsDataset from .pretrain_datasets import ( # noqa: F401 DataAugmentationForVideoMAEv2, HybridVideoMAE, VideoMAE, )
11,760
num_segments=1, num_crop=1, new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, lazy_init=False, num_sample=args.num_sample) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if is_train: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') if args.data_set == 'Kinetics-400': if not args.sparse_sample: dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, sparse_sample=False, args=args) else: dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=1, frame_sample_rate=1, num_segment=args.num_frames, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, sparse_sample=True, args=args) nb_classes = 400 elif args.data_set == 'Kinetics-600': dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 600 elif args.data_set == 'Kinetics-700': dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 700 elif args.data_set == 'Kinetics-710': dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 710 elif args.data_set == 'SSV2':
# -------------------------------------------------------- # Based on BEiT, timm, DINO and DeiT code bases # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/facebookresearch/deit # https://github.com/facebookresearch/dino # --------------------------------------------------------' def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAEv2(args) dataset = VideoMAE( root=args.data_root, setting=args.data_path, train=True, test_mode=False, name_pattern=args.fname_tmpl, video_ext='mp4', is_color=True, modality='rgb', num_segments=1, num_crop=1, new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, lazy_init=False, num_sample=args.num_sample) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if is_train: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') if args.data_set == 'Kinetics-400': if not args.sparse_sample: dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, sparse_sample=False, args=args) else: dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=1, frame_sample_rate=1, num_segment=args.num_frames, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, sparse_sample=True, args=args) nb_classes = 400 elif args.data_set == 'Kinetics-600': dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 600 elif args.data_set == 'Kinetics-700': dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 700 elif args.data_set == 'Kinetics-710': dataset = VideoClsDataset( anno_path=anno_path, data_root=args.data_root, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 710 elif args.data_set == 'SSV2':
dataset = RawFrameClsDataset(
0
2023-11-24 07:28:22+00:00
16k
wenquanlu/HandRefiner
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * (1. - mask) + mask * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,446
raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, masks, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') loss = loss * masks[:,None,:,:] else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
9
2023-11-24 10:19:23+00:00
16k
eth-sri/language-model-arithmetic
src/model_arithmetic/model_arithmetic.py
[ { "identifier": "load_model", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n This function is used to load a model based on several parameters including the type of task it is targeted to perform.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n\n classification (bool): If True, loads the model for sequence classification.\n\n token_classification (bool): If True, loads the model for token classification.\n\n return_tokenizer (bool): If True, returns the tokenizer along with the model.\n\n dtype: The data type that PyTorch should use internally to store the model’s parameters and do the computation.\n\n load_dtype (bool): If False, sets dtype as torch.float32 regardless of the passed dtype value.\n\n rl (bool): If True, loads model specifically designed to be used in reinforcement learning environment.\n\n peft_config: Configuration details for Peft models. \n \n Returns:\n It returns a model for the required task along with its tokenizer, if specified.\n \"\"\"\n log(logger.debug, f\"Loading model for {dir_or_model} with {classification}, {dtype}, {load_dtype}\")\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if not load_dtype:\n dtype = torch.float32\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n\n original_model_name = model_name\n\n if classification:\n model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\") # to investigate: calling torch_dtype here fails.\n elif token_classification:\n model = AutoModelForTokenClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n elif rl:\n model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, \n peft_config=peft_config, device_map=\"auto\")\n else:\n if model_name.endswith(\"GPTQ\") or model_name.endswith(\"GGML\"):\n model = AutoGPTQForCausalLM.from_quantized(model_name,\n use_safetensors=True,\n trust_remote_code=True,\n # use_triton=True, # breaks currently, unfortunately generation time of the GPTQ model is quite slow\n quantize_config=None, device_map=\"auto\")\n else:\n model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n\n if is_lora_dir:\n model = PeftModel.from_pretrained(model, dir_or_model)\n \n try:\n tokenizer = load_tokenizer(original_model_name)\n model.config.pad_token_id = tokenizer.pad_token_id\n except Exception:\n pass\n if return_tokenizer:\n return model, load_tokenizer(original_model_name)\n return model" }, { "identifier": "load_tokenizer", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_tokenizer(dir_or_model):\n \"\"\"\n This function is used to load the tokenizer for a specific pre-trained model.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n \n Returns:\n It returns a tokenizer that can convert text to tokens for the specific model input.\n \"\"\"\n log(logger.debug, f\"Loading tokenizer for {dir_or_model}\")\n\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n \n if os.path.isfile(os.path.join(dir_or_model, \"config.json\")):\n loaded_json = json.load(open(os.path.join(dir_or_model, \"config.json\"), \"r\"))\n model_name = loaded_json[\"_name_or_path\"]\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if tokenizer.pad_token is None:\n log(logger.debug, \"Setting pad token to eos token\")\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n \n return tokenizer" }, { "identifier": "get_max_length", "path": "src/model_arithmetic/utils.py", "snippet": "def get_max_length(model_config, default_length=1024):\n \"\"\"\n Get the maximum length from the model configuration.\n\n Args:\n model_config (object): The model configuration object.\n default_length (int, optional): The default maximum length. Defaults to 1024.\n\n Returns:\n int: The maximum length.\n \"\"\"\n max_length = None\n for length_setting in [\"n_positions\", \"max_position_embeddings\", \"seq_length\"]:\n max_length = getattr(model_config, length_setting, None)\n if max_length:\n if ENABLE_LOGGING:\n logger.debug(f\"Found max length: {max_length}\")\n break\n if not max_length:\n max_length = default_length\n if ENABLE_LOGGING:\n logger.debug(f\"Using default max length: {max_length}\")\n\n return max_length" }, { "identifier": "ENABLE_LOGGING", "path": "src/model_arithmetic/utils.py", "snippet": "ENABLE_LOGGING = False" }, { "identifier": "log", "path": "src/model_arithmetic/utils.py", "snippet": "def log(function, message):\n \"\"\"\n Logs the given message using the provided function if logging is enabled.\n \n Parameters:\n function (callable): The logging function to use.\n message (str): The message to be logged.\n \"\"\"\n if ENABLE_LOGGING:\n function(message)" }, { "identifier": "Operator", "path": "src/model_arithmetic/operators.py", "snippet": "class Operator(BaseClass):\n def __init__(self, minimum_value=-10 ** 8, **kwargs):\n \"\"\"Initializes an operator with the given keyword arguments.\n\n Args:\n minimum_value (float, optional): The minimum value any element can have: this is important when doing calculations where several logprobs have been made -torch.inf but we still want to do meaningful computations with them.\n **kwargs: The keyword arguments.\n \"\"\"\n super().__init__(**kwargs)\n self.minimum_value = minimum_value\n \n def set_to_minimum(self, output):\n \"\"\"Sets the output to the minimum value if it is smaller than the minimum value.\n\n Args:\n output (List || torch.tensor): List or torch.tensor\n \"\"\"\n if isinstance(output, list):\n for el in range(len(output)):\n if torch.is_tensor(output[el]):\n output[el][output[el] < self.minimum_value] = self.minimum_value\n elif torch.is_tensor(output):\n output[output < self.minimum_value] = self.minimum_value\n return output\n \n def evaluate(self, runnable_operator_outputs : Dict, normalize : bool = True):\n \"\"\"Evaluates the given object in the formula based on the language model outputs\n\n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def clone(self):\n \"\"\"Creates a deep copy of the object.\n\n Returns:\n A deep copy of the object.\n \"\"\"\n return copy.deepcopy(self)\n\n def norm(self, runnable_operator_outputs : Dict = None):\n \"\"\"Returns the norm of the object\n \n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n \n def runnable_operators(self):\n \"\"\"Returns the Runnable Operators in the object\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def is_finished(self, runnable_operator_outputs : Dict) -> bool:\n \"\"\"Returns whether the object is finished\n\n Args:\n runnable_operator_outputs (Dict): Maps Runnable Operators to their outputs\n\n Raises:\n NotImplementedError\n \"\"\"\n raise NotImplementedError\n\n def normalize(self, output, runnable_operator_outputs : Dict):\n \"\"\"\n Normalizes the output of the operator\n \n Args:\n output (torch.tensor || float): The output of the operator\n runnable_operator_outputs (Dict): The outputs of the runnable operators\n \"\"\"\n norm = self.norm(runnable_operator_outputs)\n if (torch.is_tensor(norm) and torch.count_nonzero(norm == 0) > 0) or (not torch.is_tensor(norm) and norm == 0):\n return output\n if not torch.is_tensor(output):\n return output\n output /= norm\n output -= torch.logsumexp(output, dim=-1, keepdim=True)\n return output\n\n\n def __add__(self, other):\n if isinstance(other, (float, int)):\n return Sum([self, Constant(other)])\n return Sum([self, other])\n\n def __radd__(self, other):\n return self.__add__(other)\n \n def __multiply__(self, other):\n if isinstance(other, (float, int)):\n return Product([self, Constant(other)])\n return Product([self, other])\n\n def __div__(self, other):\n if isinstance(other, (float, int)):\n return Product([self, Constant(1 / other)])\n raise NotImplementedError\n\n def __rdiv__(self, other):\n raise NotImplementedError\n\n def __sub__(self, other):\n return self.__add__(-other)\n\n def __neg__(self):\n return self.__multiply__(-1)\n\n def __rmultiply__(self, other):\n return self.__multiply__(other)\n\n def __mul__(self, other):\n return self.__multiply__(other)\n\n def __rmul__(self, other):\n return self.__multiply__(other)\n\n def __rsub__(self, other):\n self_ = self.__neg__()\n return self_.__add__(other)\n \n def __str__(self):\n return f\"{self.__class__.__name__}({self.kwargs})\"" }, { "identifier": "Monitor", "path": "src/model_arithmetic/monitor.py", "snippet": "class Monitor(MultipleMonitor):\n \"\"\"\n Final monitor object that keeps track of values for runnable operators, but also for the whole formula\n \"\"\"\n def __init__(self, runnable_operators):\n \"\"\"\n Initialize the Monitor object.\n \n Args:\n runnable_operators(List[RunnableOperator]): A list of runnable operators.\n \"\"\"\n super().__init__(models_monitor=ModelsMonitor(runnable_operators))\n \n def pop_results(self, n=1, runnable_operator=None, indicator=None):\n \"\"\"Pop results from the monitor.\n\n Args:\n n (int, optional): Number of elements to pop. Defaults to 1.\n runnable_operator (RunnableOperator, optional): From which ModelMonitor to pop the results. Defaults to None.\n indicator (string, optional): Name of the type to pop. Defaults to None.\n \"\"\"\n if runnable_operator is None:\n super().pop_results(n, indicator=indicator)\n else:\n self.models_monitor.pop_results(n, runnable_operator, indicator=indicator)\n \n def merge(self, other):\n \"\"\"\n Merge the elements of another Monitor object with the elements of this object.\n Args:\n other (Monitor): The other Monitor object.\n \"\"\"\n super().merge(other)\n self.models_monitor.merge(other.models_monitor)\n \n def add_result(self, element, runnable_operator=None, indicator=None):\n \"\"\"\n Add a result to the monitor.\n Args:\n element (float): The result to be added.\n runnable_operator (RunnableOperator): The runnable operator associated with the result.\n indicator (string, optional): The name of the time type.\n \"\"\"\n if runnable_operator is None:\n super().add_result(element, indicator=indicator)\n else:\n self.models_monitor.add_result(element, runnable_operator, indicator=indicator)\n \n def get_store_settings(self):\n \"\"\"\n Gets the store settings of the parent class and the models monitor.\n \"\"\"\n sum_vals = [monitor.total() for monitor in self.models_monitor.monitors.values()]\n if len(sum_vals) > 0:\n total_time_no_model_calls = self.total() - sum(sum_vals)\n else:\n total_time_no_model_calls = self.total()\n\n return {\n **super().get_store_settings(),\n \"total_time_no_model_calls\": total_time_no_model_calls,\n \"models_monitor\": self.models_monitor.get_store_settings()\n }" }, { "identifier": "RunnableOperator", "path": "src/model_arithmetic/runnable_operators.py", "snippet": "class RunnableOperator(Operator):\n def __init__(self, prompt_string=\"\", model=None, speculative_factor=1, \n prompt_template = lambda prompt_string, input_string: prompt_string + input_string, run_priority=0, group=None, \n outputs_logprobs=True, **kwargs):\n \"\"\"\n Initialize a runnable operator instance. A runnable operator is an operator that generates a probability distribution instead of modifies an existing one.\n \n Args:\n prompt_string (str): String to be used as a prompt. Only used in specific runnable operators\n model (optional): Model to be used for operation. If None, the model must be set later to the default model to be used.\n speculative_factor (int): Factor for speculative sampling.\n prompt_template (callable): Function for generating prompt. Takes two arguments: prompt_string and input_string. The operator will be run on prompt_template(..., ...) + continuation_tokens\n run_priority (int): Priority for running the operation. Higher priority means the operation will be run first, especially important for the classifier.\n group (optional): Group to which the operator belongs. This ensures that speculative sampling will not be tried when not all operators of a group are finished.\n outputs_logprobs (bool): Whether the operator outputs logprobs.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n super().__init__(speculative_factor=speculative_factor, model=model, prompt_string=prompt_string,\n prompt_template=prompt_template, run_priority=run_priority, group=group, outputs_logprobs=outputs_logprobs, **kwargs)\n self.cache = None\n \n def run_condition(self, new_tokens, trigger_end):\n \"\"\"\n Determine if the run condition is met.\n \n Args:\n new_tokens (List[int]): Number of new tokens per sample in the batch\n trigger_end (List[bool]): Whether to trigger the end for each sample in the batch.\n \n Returns:\n bool: Whether the run condition is met.\n \"\"\"\n new_tokens = [new_tokens[i] if not trigger_end[i] or new_tokens[i] < 0 else max(new_tokens[i], self.speculative_factor) for i in range(len(new_tokens))]\n return np.mean(new_tokens) >= self.speculative_factor \n # other possibility:\n # return np.max(new_tokens) + 1 >= speculative_factor\n \n def delete_cache(self, index=None, from_=None):\n \"\"\"\n Delete the cache.\n \"\"\"\n if from_ is None and index is None:\n self.cache = None\n \n def run(self, tokenized_inputs, **kwargs):\n \"\"\"\n Run the operation. This method needs to be implemented by subclasses.\n \n Args:\n tokenized_inputs (torch.tensor): Inputs that have been tokenized.\n **kwargs: Arbitrary keyword arguments.\n \n Raises:\n NotImplementedError: This method needs to be implemented by subclasses.\n \"\"\"\n raise NotImplementedError(\"This method needs to be implemented by subclasses.\")\n \n def runnable_operators(self):\n \"\"\"\n Get a list of runnable operators used by the operator, usually only this operator itself.\n \n Returns:\n list: List of runnable operators.\n \"\"\"\n return [self]\n \n def same_operator(self, other):\n \"\"\"\n Determine if the other operator is the same as this one. This is important to avoid redundant runs of the same operator in a formula\n \n Args:\n other: Other operator to be compared.\n \n Returns:\n bool: Whether the other operator is the same as this one.\n \"\"\"\n if isinstance(other, str):\n return self.id() == other\n elif isinstance(other, RunnableOperator):\n return self.id() == other.id()\n return False\n\n def norm(self, runnable_operator_outputs=None):\n \"\"\"\n Compute the norm of the operator.\n \n Args:\n runnable_operator_outputs (optional): Outputs of runnable operators.\n \n Returns:\n int: The norm of the operator.\n \"\"\"\n if runnable_operator_outputs is None or self.is_finished(runnable_operator_outputs):\n return 1\n return 0\n \n def is_finished(self, runnable_operator_outputs):\n \"\"\"\n Determine if the operation is finished.\n \n Args:\n runnable_operator_outputs: Outputs of runnable operators.\n \n Returns:\n bool: Whether the operation is finished.\n \"\"\"\n return any([self.same_operator(output) and runnable_operator_outputs[output] is not None for output in runnable_operator_outputs])\n \n def evaluate(self, runnable_operator_outputs : Dict, normalize : bool = True):\n \"\"\"\n Evaluate the operation.\n \n Args:\n runnable_operator_outputs (Dict): Outputs of runnable operators.\n normalize (bool): Whether to normalize the evaluation.\n \n Returns:\n int: The evaluation of the operation.\n \"\"\"\n for output in runnable_operator_outputs:\n if self.same_operator(output) and runnable_operator_outputs[output] is not None:\n return runnable_operator_outputs[output]\n return 0\n \n def generate_settings(self):\n \"\"\"\n Generate settings for the operation.\n \n Returns:\n dict: Settings for the operation.\n \"\"\"\n kwargs = super().generate_settings()\n kwargs[\"prompt_template\"] = self.prompt_template(\"{{prompt_string}}\", \"{{input_string}}\")\n return kwargs\n\n @staticmethod\n def load_from_settings(settings):\n \"\"\"\n Load operator from settings.\n \n Args:\n settings (dict): Settings for the operation.\n \n Returns:\n Operator: Operator loaded from settings.\n \"\"\"\n copy = settings[\"prompt_template\"]\n prompt_template = lambda prompt_string, input_string: copy.replace(\"{{prompt_string}}\", prompt_string).replace(\"{{input_string}}\", input_string)\n settings[\"prompt_template\"] = prompt_template\n return Operator.load_from_settings(settings)\n \n def get_prompt(self, input_string):\n \"\"\"\n Get the prompt for the operation.\n \n Args:\n input_string (str): String to be used as input.\n \n Returns:\n callable: Function for generating prompt.\n \"\"\"\n return self.prompt_template(self.prompt_string, input_string)\n \n def get_store_params(self):\n \"\"\"\n Get parameters for storing the operation.\n \n Returns:\n dict: Parameters for storing the operation.\n \"\"\"\n return {\n \"class\": self.__class__.__name__,\n \"model\": self.model,\n \"speculative_factor\": self.speculative_factor,\n \"prompt_template\": self.prompt_template(self.prompt_string, \"{{input_string}}\")\n }\n \n def id(self):\n \"\"\"\n Get the ID of the operation.\n \n Returns:\n str: ID of the operation.\n \"\"\"\n kwargs = self.kwargs.copy()\n kwargs[\"prompt_template\"] = self.prompt_template(self.prompt_string, \"{{input_string}}\")\n return f\"{self.__class__.__name__}(**{kwargs})\"\n \n def load_model(self, dtype):\n \"\"\"\n Load the model for the operation. Only needs to be overwritten when a model is necessary\n \n Args:\n dtype: Data type for the model.\n \n Returns:\n None\n \"\"\"\n return None\n \n def initialize_after_model_set(self):\n \"\"\"\n Initialize the operation after the model is set (to the default model if necessary).\n \n Raises:\n AssertionError: If the model is not set before initializing.\n \"\"\"\n assert self.model is not None, \"Model must be set before initializing.\"" }, { "identifier": "PromptedLLM", "path": "src/model_arithmetic/runnable_operators.py", "snippet": "class PromptedLLM(RunnableOperator):\n def __init__(self, prompt_string, model=None, speculative_factor=1, \n prompt_template = lambda prompt_string, input_string, : prompt_string + \"\\n\" + input_string, dtype=None, group=None,\n enable_cache=True, dim_keys_past=2, dim_values_past=2, run_eager=False, tokenizer=None, **kwargs):\n \"\"\"\n Initializes an LLM Prompt. This is a runnable operator that uses a language model to generate a probability distribution.\n Args:\n prompt_string (str): String to be used as a prompt. Only used in specific runnable operators\n model (optional): Model to be used for operation. If None, the model must be set later to the default model to be used.\n speculative_factor (int): Factor for speculative sampling.\n prompt_template (callable): Function for generating prompt. Takes two arguments: prompt_string and input_string. The operator will be run on prompt_template(..., ...) + continuation_tokens\n run_priority (int): Priority for running the operation. Higher priority means the operation will be run first, especially important for the classifier.\n dtype (optional): Data type for the model.\n group (optional): Group to which the operator belongs. This ensures that speculative sampling will not be tried when not all operators of a group are finished.\n enable_cache (bool): Whether to enable the key-value cache.\n dim_keys_past (int): Dimension of the keys in the key-value cache. Usually 2, but for other models this can be different.\n dim_values_past (int): Dimension of the values in the key-value cache. Usually 2, but for other models this can be different.\n run_eager (bool): Whether to run the model in eager mode. This is necessary for some models, but incompatible with speculative sampling and some other features.\n tokenizer (Tokenizer): Tokenizer to be used for the operation. If None, the default tokenizer will be used.\n **kwargs: Arbitrary keyword arguments.\n \"\"\"\n if dim_keys_past == 2 and dim_values_past == 2:\n # set the dims based on the model\n if model in [\"tiiuae/falcon-7b\", \"tiiuae/falcon-7b-instruct\", \"tiiuae/falcon-40b\", \"tiiuae/falcon-40b-instruct\"]:\n dim_keys_past = 1\n dim_values_past = 1\n \n super().__init__(prompt_string=prompt_string, model=model, speculative_factor=speculative_factor, \n prompt_template=prompt_template, group=group, enable_cache=enable_cache, \n dim_keys_past=dim_keys_past, dim_values_past=dim_values_past, run_eager=run_eager)\n self.dtype = dtype\n self.tokenizer_length = None\n self.tokenizer = tokenizer\n self.previous_input_ids = None\n self.default_dim = 2\n if self.run_eager:\n log(logger.warning, \"Eager mode is enabled. This will make several features, such as speculative sampling, inaccessible.\")\n \n def load_model(self, dtype):\n \"\"\"\n Loads the model for the operation.\n :param dtype: Data type for the model.\n \"\"\"\n if not isinstance(self.model, str):\n return self.model\n if self.dtype is None:\n return load_model(self.model, dtype=dtype)\n return load_model(self.model, dtype=self.dtype)\n \n def initialize_after_model_set(self):\n if self.tokenizer is None:\n tokenizer = load_tokenizer(self.model)\n self.tokenizer_length = len(tokenizer)\n \n def select_from_sample_cache(self, sample, from_=None, until=None):\n \"\"\"Selects the cache from a sample that needs to be stored\n\n Args:\n sample (torch.tensor): Torch tensor, the samples key-value past as stored by the LLM\n from_ (int, optional): From which value to store the key-value past. Defaults to None.\n until (int, optional): Until which value to store the key-value past. Defaults to None.\n \"\"\"\n for i in range(len(sample)):\n for j in range(len(sample[i])):\n sample[i][j] = sample[i][j][:, from_:until]\n \n return sample\n \n def swap_dimensions(self, sample):\n \"\"\"Swaps dimensions in order to make the dimensions match the default dimensions. This is necessary because models do not use the same semantics for the key-value storage\n\n Args:\n sample (List[torch.tensor]): Key-value past as stored by the LLM\n \"\"\"\n for i in range(len(sample)):\n # keys, values\n if self.default_dim != self.dim_keys_past:\n sample[i][0] = sample[i][0].transpose(self.default_dim - 1, self.dim_keys_past - 1)\n if self.default_dim != self.dim_values_past:\n sample[i][1] = sample[i][1].transpose(self.default_dim - 1, self.dim_values_past - 1)\n \n return sample\n \n def select_sample_cache(self, cache, sample_index):\n \"\"\"Stores the key value past by selecting the sample index from the cache and storing them in a list\n\n Args:\n cache (List[torch.tensor]): Key-value cache as returned by the model\n sample_index (int): Which sample to select\n \"\"\"\n sample = []\n for i in range(len(cache)):\n sample.append([\n cache[i][0][sample_index],\n cache[i][1][sample_index]\n ])\n sample = self.swap_dimensions(sample)\n return sample\n \n def pad_sample(self, sample, target_size):\n \"\"\"Pads all samples key-value cache to a specific size\n\n Args:\n sample (torch.tensor): Key-value cache as stored by the LLM\n target_size (int): Target size\n \"\"\"\n for i in range(len(sample)):\n for j in range(len(sample[i])):\n pad_size = target_size - sample[i][j].size(1)\n pad = (0, 0, pad_size, 0)\n if pad_size > 0:\n sample[i][j] = torch.nn.functional.pad(sample[i][j], pad, \"constant\", 0)\n elif pad_size < 0:\n sample[i][j] = sample[i][j][:, :target_size]\n return sample\n \n def stack_samples(self, samples):\n \"\"\"Stacks the samples key-value cache by removing the List dimension and reordering to be appropriate for storing\n\n Args:\n samples (List[torch.tensor]): Key-value cache as returend by the model\n \"\"\"\n stacked_samples = []\n for i in range(len(samples[0])):\n stacked_mult = []\n for j in range(len(samples[0][i])):\n stacked = torch.stack(\n [samples[k][i][j] for k in range(len(samples))], dim=0\n )\n stacked_mult.append(stacked)\n stacked_samples.append(stacked_mult)\n return stacked_samples\n \n def store_cache(self, past_key_values, input_ids, lengths):\n \"\"\"Stores the past key values returned by the model in an appropriate way\n\n Args:\n past_key_values (List[torch.tensor]): Tensor in which the key values where reutrned\n input_ids (torch.tensor): Input ids\n lengths (List[int]): Length of each sample in the batch\n \"\"\"\n if self.run_eager:\n self.cache = past_key_values\n return\n self.cache = []\n self.previous_input_ids = []\n for i, length in enumerate(lengths):\n self.cache.append(\n self.select_from_sample_cache(self.select_sample_cache(past_key_values, i), from_=-length)\n )\n self.previous_input_ids.append(\n input_ids[i, -length:]\n )\n def common_starting_elements(self, t1, t2):\n \"\"\"Check for the common starting elements in two tensors\n\n Args:\n t1 (torch.tensor): First Tensor\n t2 (torch.tensor): Second Tensor\n \"\"\"\n min_length = min(t1.size(0), t2.size(0))\n eq = torch.eq(t1[:min_length], t2[:min_length])\n if not eq.any():\n return 0\n if eq.all():\n return min_length\n\n return torch.where(eq == 0)[0][0].item()\n \n def delete_previous_cache(self, new_input_ids, lengths):\n \"\"\"Deletes previous cache by only keeping the common elements between the previous input ids and the new input ids\n\n Args:\n new_input_ids (torch.tensor): New input ids\n lengths (List[int]): List of lengths\n \"\"\"\n if self.run_eager:\n return\n input_ids = [\n new_input_ids[i, -lengths[i]:] for i in range(len(lengths))\n ]\n elements = [self.common_starting_elements(input_ids[i], self.previous_input_ids[i]) for i in range(len(lengths))]\n self.cache = [\n self.select_from_sample_cache(self.cache[i], until=elements[i]) for i in range(len(lengths))\n ]\n \n \n def prepare_inputs(self, input_ids, attention_mask, n_new_tokens):\n \"\"\"Prepares the inputs for the model\n\n Args:\n input_ids (torch.tensor): Input ids\n attention_mask (torch.tensor): Attention Mask\n n_new_tokens (int): Number of new tokens since last run\n \"\"\"\n max_new_tokens = max(n_new_tokens)\n past_key_values = None\n if self.cache is not None and self.enable_cache:\n input_ids = input_ids[:, -max_new_tokens:]\n if self.run_eager:\n past_key_values = self.cache\n else:\n past_key_values = self.pad_cache(\n [self.select_from_sample_cache(self.cache[i], until=-max_new_tokens + n_new_tokens[i]) if max_new_tokens > n_new_tokens[i] else self.cache[i]\n for i in range(len(n_new_tokens))],\n attention_mask.shape[1] - max_new_tokens\n )\n return {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": True,\n \"past_key_values\": past_key_values\n }\n \n def pad_cache(self, cache, length):\n \"\"\"Pads the cache and prepares them for the model\n\n Args:\n cache (torch.tensor): Key-value cache as stored by the LLM\n lengths (List[int]): List of lengths\n \"\"\"\n for i in range(len(cache)):\n cache[i] = self.pad_sample(cache[i], length)\n cache[i] = self.swap_dimensions(cache[i])\n stacked_samples = self.stack_samples(cache)\n\n return stacked_samples\n \n def delete_cache(self, index=None, from_=None):\n \"\"\"Deletes all cache\n\n Args:\n index (int, optional): _description_. Defaults to None.\n from_ (int, optional): _description_. Defaults to None.\n \"\"\"\n # if index is not None and self.cache is not None:\n # self.previous_input_ids = self.previous_input_ids[:index] + self.previous_input_ids[index + 1:]\n # cache_shape = list(self.cache[0].shape)\n # device = self.cache[0].device\n # dtype = self.cache[0].dtype\n # cache_shape[-2] = 0\n # self.cache = self.cache[:index] + self.cache[index + 1:]\n # self.previous_input_ids.append(torch.tensor([]))\n # self.cache.append(torch.tensor([], device=device, dtype=dtype).reshape(cache_shape))\n # return\n # else:\n self.previous_input_ids = None\n self.cache = None\n\n def run(self, tokenized_inputs, loaded_models, model_new_tokens, use_cache, **kwargs):\n \"\"\"\n Runs the model on the tokenized inputs.\n Args:\n tokenized_inputs (torch.tensor): Inputs that have been tokenized.\n loaded_models (dict[PreTrainedModel]): Models that have been loaded. The model for this operation is in loaded_models[self.model]\n model_new_tokens (List[int]): Number of new tokens per sample in the batch\n use_cache (bool): Whether to use the key-value cache.\n \"\"\"\n if isinstance(self.model, str):\n model = loaded_models[self.model]\n else:\n model = self.model\n lengths = torch.sum(tokenized_inputs.attention_mask, dim=-1)\n if self.cache is not None and self.enable_cache and use_cache:\n self.delete_previous_cache(tokenized_inputs.input_ids, lengths)\n \n # if self.cache is not None:\n # length_common_input_ids_per_sample = [\n \n # ]\n actual_inputs = self.prepare_inputs(input_ids=tokenized_inputs.input_ids.to(model.device),\n attention_mask=tokenized_inputs.attention_mask.to(model.device),\n n_new_tokens=model_new_tokens)\n # run model \n with torch.no_grad():\n try:\n model_output = model(**actual_inputs, return_dict=True)\n except RuntimeError as e:\n raise RuntimeError(f\"Error thrown when running model. This is probably caused because the model handles the key-value cache differently. Consider setting dim_values_past and dim_keys_past values or disabling the key-value cache. Alternatively, you can set run_eager=True, but this feature is incompatible with speculative sampling and some other features.\")\n logprobs = torch.log_softmax(model_output.logits[:, :, :self.tokenizer_length], dim=-1)\n \n if self.enable_cache and use_cache:\n self.store_cache(model_output.past_key_values, tokenized_inputs.input_ids, lengths)\n \n logprobs = [logprobs[i, -model_new_tokens[i] : ].to(torch.float32) for i in range(logprobs.shape[0])]\n return logprobs\n\n def __str__(self):\n return f\"PromptedLLM('{self.prompt_string}', model='{self.model}')\"" }, { "identifier": "TokenizedInput", "path": "src/model_arithmetic/input.py", "snippet": "class TokenizedInput:\n \"\"\"\n Keeps track of the tokenized input of a runnable operator. Automatically sets the correct tokens, by using the runnable operator's get_prompt method.\n \"\"\"\n def __init__(self, runnable_operator, model_name, model_config, tokenizer):\n \"\"\"\n Initialize the TokenizedInput object.\n\n Args:\n runnable_operator (RunnableOperator): An object that provides a get_prompt method.\n model_name (str): The name of the model.\n model_config (object): The configuration of the model.\n tokenizer (object): The tokenizer to be used.\n \"\"\"\n self.runnable_operator = runnable_operator\n self.input_tokens = []\n self.only_input_tokens = None\n self.tokenizer = tokenizer\n self.max_length = get_max_length(model_config)\n self.set_inputs([\"\"])\n # this is essentially what huggingface also does, but it is kinda hidden in their sample code (GenerationMixin.generate)\n self.tokenizer.padding_side = \"left\"\n \n def extend_batch_size(self, batch_size):\n \"\"\"\n Extend the size of the batch to the given size. If the current size is less than the given size, \n the first element is repeated to fill the batch.\n \n Necessary for compatibility with lm_eval\n\n Args:\n batch_size (int): The desired batch size.\n \"\"\"\n if len(self.input_tokens) != batch_size:\n self.input_tokens = [self.input_tokens[0]] * batch_size\n \n def set_inputs(self, inputs):\n \"\"\"\n Set the inputs for the TokenizedInput object.\n\n Args:\n inputs (list): A list of input strings.\n \"\"\"\n self.input_tokens = [self.runnable_operator.get_prompt(input_string) for input_string in inputs]\n bos_token = \"\"\n if self.tokenizer.bos_token_id is not None:\n self.input_tokens = [\n [self.tokenizer.bos_token_id] + self.tokenizer(input_string, truncation=True, max_length=self.max_length, add_special_tokens=False).input_ids\n for input_string in self.input_tokens\n ]\n bos_token = self.tokenizer.bos_token\n else:\n self.input_tokens = [\n self.tokenizer(input_string, truncation=True, max_length=self.max_length, add_special_tokens=False).input_ids\n for input_string in self.input_tokens\n ]\n \n only_prompt = [bos_token + self.runnable_operator.get_prompt(\"\")]\n self.only_input_tokens = self.tokenizer(only_prompt, padding=True, return_tensors=\"pt\", truncation=True, max_length=self.max_length, add_special_tokens=False)\n \n if \"token_type_ids\" in self.only_input_tokens:\n del self.only_input_tokens[\"token_type_ids\"]\n \n def get_only_input_tokens(self):\n \"\"\"\n Get the input tokens without any continuation tokens.\n\n Returns:\n object: The input tokens without any continuation tokens.\n \"\"\"\n return self.only_input_tokens\n \n def add_continuation_tokens(self, tokens):\n \"\"\"\n Add continuation tokens to the input tokens.\n\n Args:\n tokens (list): A list of continuation tokens.\n\n Returns:\n object: The input tokens with the continuation tokens added.\n \"\"\"\n output = [\n input_token + token for input_token, token in zip(self.input_tokens, tokens)\n ]\n truncated_output = [\n output[:self.max_length] for output in output\n ]\n padded_output = self.tokenizer.pad({\"input_ids\": truncated_output}, padding=True, return_tensors=\"pt\")\n return padded_output" }, { "identifier": "Compatibility", "path": "src/model_arithmetic/lm_eval_compatibility.py", "snippet": "class Compatibility:\n \"\"\"Compatibility class to allow the use of LM eval. Main compatibility issue is that lm eval does not allow to distinguish between the input tokens and the continuation tokens. This class fixes this manually by going\n through the task inputs and finding the one that matches the input tokens.\n \"\"\"\n def __init__(\n self,\n task_name,\n needs_input_tokens_lm_eval,\n tokenizer,\n device,\n max_length,\n ): \n \n \"\"\"Initializes the compatibility class.\n \n Args:\n task_name (str): Name of the task.\n needs_input_tokens_lm_eval (bool): Whether the task needs the input tokens or not. If it does, the program will try to find the input tokens in the task inputs.\n tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase): Tokenizer to be used.\n device (torch.device): Device to be used.\n max_length (int): Maximum length of the input tokens.\n \"\"\"\n self.task_name = task_name\n self.needs_input_tokens_lm_eval = needs_input_tokens_lm_eval\n self.tokenizer = tokenizer\n self.task_inputs = []\n self.device = device\n self.task_initialized = False\n self.max_length = max_length\n \n def initialize_task(self):\n \"\"\"Initializes the task. Looks up all the task inputs and stores them in a list. Gets encoded inputs along with the input length\n \"\"\"\n if self.task_initialized:\n return\n self.task_initialized = True\n self.task_inputs = []\n task = get_task(self.task_name)()\n \n if task.has_test_docs():\n task_doc_func = task.test_docs\n elif task.has_validation_docs():\n task_doc_func = task.validation_docs\n \n dataset = pd.DataFrame(task_doc_func())\n rnd = random.Random()\n rnd.seed(42)\n list_indices = list(range(len(dataset)))\n rnd.shuffle(list_indices)\n dataset = dataset.iloc[list_indices]\n # rnd.shuffle(dataset)\n \n for index in range(len(dataset)):\n doc = dict(dataset.iloc[index])\n ctx = task.fewshot_context(\n doc=doc, num_fewshot=0, rnd=rnd, description=\"\"\n )\n requests = task.construct_requests(doc, ctx)\n input_ = task.doc_to_text(doc)\n input_encoded = self.tokenizer(input_, return_tensors=\"pt\", truncation=True, max_length=self.max_length).input_ids[0]\n for request in requests:\n task_input = self.tokenizer(\"\".join(request.args), return_tensors=\"pt\", truncation=True, max_length=self.max_length).input_ids.to(self.device)[0]\n task_input_length = len(input_encoded)\n # double encoding decoding is necessary for the llama tokenizer (for example, a \"...\" got an extra space in front of it if you don't do this)\n self.task_inputs.append((task_input, len(task_input) - task_input_length, self.tokenizer.decode(task_input[:-1])))\n \n def is_target(self, input_tokens, task_input):\n \"\"\"Checks whether the input tokens are the target tokens starting from the end of the input tokens.\n\n Args:\n input_tokens (torch.tensor): Input tokens\n task_input (torch.tensor): Task Input Tokens\n \"\"\"\n return torch.all(input_tokens[-len(task_input):] == task_input)\n \n def find_in_task(self, input_tokens):\n \"\"\"Finds the input tokens in the task inputs. First does an exact match and then a fuzzy match if the exact match came up empty .\n\n Args:\n input_tokens (torch.tensor): Input Tokens\n \"\"\"\n if not self.task_initialized:\n self.initialize_task()\n \n decoded = self.tokenizer.decode(input_tokens)\n for i in range(len(self.task_inputs)):\n guess = self.task_inputs[i][2]\n if guess in decoded:\n return self.task_inputs[i]\n fuzzes = []\n for i in range(len(self.task_inputs)):\n guess = self.task_inputs[i][2]\n fuzzes.append(fuzz.partial_ratio(guess, decoded))\n\n return self.task_inputs[fuzzes.index(max(fuzzes))]\n \n def forward_preprocessing(self, input_ids, model_input_tokens, **kwargs):\n \"\"\"Implements the main preprocessing step. This is necessary to be able to use lm-evaluation-harness. This function finds the input tokens in the task inputs and then extends the batch size of the model input tokens\n\n Args:\n input_ids (torch.tensor): Input ids\n model_input_tokens (Input): Input classes to be used for the various models in the Model Arithmetic class\n \"\"\"\n ### this is a bit cheeky, but in order to be compatible with lm-evaluation-harness, we need to implement this method\n if not isinstance(input_ids, list):\n continuation_tokens = input_ids.tolist()\n else:\n continuation_tokens = input_ids\n \n # necessary for no context\n if self.needs_input_tokens_lm_eval and get_task is not None:\n inputs = []\n continuation_tokens = []\n for i in range(len(input_ids)):\n task_element = self.find_in_task(input_ids[i])\n if task_element[1] > 1:\n inputs.append(self.tokenizer.decode(input_ids[i][:-task_element[1] + 1]))\n continuation_tokens.append(input_ids[i][-task_element[1] + 1:].tolist())\n else:\n inputs.append(self.tokenizer.decode(input_ids[i]))\n continuation_tokens.append([])\n \n for runnable_operator_id in model_input_tokens:\n model_input_tokens[runnable_operator_id].extend_batch_size(len(continuation_tokens))\n model_input_tokens[runnable_operator_id].set_inputs(inputs)\n else: \n for runnable_operator_id in model_input_tokens:\n model_input_tokens[runnable_operator_id].extend_batch_size(len(continuation_tokens))\n \n return continuation_tokens\n \n def forward_post_processing(self, logprobs, input_shape):\n \"\"\"Does some small post processing steps to make sure the correct shape is returned for the logprobs.\n\n Args:\n logprobs (torch.tensor): Returned logprobs\n input_shape (torch.tensor): The shape of the input tokens\n \"\"\"\n if self.needs_input_tokens_lm_eval:\n if torch.is_tensor(logprobs) and len(logprobs.shape) == 3 and logprobs.shape[1] != input_shape[1]:\n # set the output to the correct shape, by adding zeros in the beggining in the first axis\n logprobs = torch.cat([torch.zeros((logprobs.shape[0], input_shape[1] - logprobs.shape[1], logprobs.shape[2]), device=logprobs.device), logprobs], dim=1)\n \n return logprobs" } ]
from transformers import PreTrainedModel from .basic_model_loader import load_model, load_tokenizer from .utils import get_max_length, ENABLE_LOGGING, log from collections import namedtuple from transformers import top_k_top_p_filtering from loguru import logger from .operators import Operator from .monitor import Monitor from .runnable_operators import RunnableOperator, PromptedLLM from .input import TokenizedInput from .lm_eval_compatibility import Compatibility import json import numpy as np import torch import os import time import random
13,708
do_speculation_sample = False # speculation sampling not needed if all models have not been run yet: this is the first model on this token if all([logprob is None for logprob in self.model_prediction_history[i][n_token].values()]): do_speculation_sample = False # This means that this token was already fully accepted, so we can just continue (can happen if batch_size > 1 or when end is triggered) if self.max_index_prediction_history(i) > n_token: continue # add the new model logprobs self.model_prediction_history[i][n_token][runnable_operator.id()] = new_model_logprobs[i][-n_generated_tokens + n_token + num_new_tokens[i] - 1] group_model_history = self.group_model_history(self.model_prediction_history[i][n_token]) # group_model_history needs to be separately checked, since it could be that the group is not yet fully calculated # also allow no logprobs runnable operators (would lead to errors) if the formula is not finished yet (if it is finished, you need to) if all([logprob is None for logprob in group_model_history.values()]) or (not runnable_operator.outputs_logprobs and not self.formula.is_finished(group_model_history)): continue # process the logprobs new_model_probs = self.process_logprobs(group_model_history) if self.intermediate_argmax and not self.formula.is_finished(group_model_history): argmax_el = torch.argmax(new_model_probs) new_model_probs = torch.zeros_like(new_model_probs) new_model_probs[argmax_el] = 1.0 if do_speculation_sample: if self.calculate_statistics: self.monitor.add_result(self.expected_acceptance_prob(self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), self.create_sample_logprobs(self.logprobs_history[i].get(n_token), temperature, top_k, top_p)), indicator="expected_acceptance_prob", runnable_operator=runnable_operator) new_token, kept = self.speculation_sample( token = generated_tokens[i][n_token], previous_models_probs=self.create_sample_logprobs(self.logprobs_history[i][n_token], temperature, top_k, top_p), new_models_probs=self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), ) if n_token in self.model_prediction_history[i]: self.logprobs_history[i][n_token] = new_model_probs if not kept: # if not kept, we change the generated tokens and remove the model prediction history after that token generated_tokens[i][n_token] = new_token generated_tokens[i] = generated_tokens[i][:n_token + 1] self.clear_model_prediction_history(i, generated_tokens[i], from_=n_token) self.trigger_end[i] = False elif n_token in self.model_prediction_history[i]: self.logprobs_history[i][n_token] = new_model_probs if not kept: break all_kept.append(kept) return all_kept def clear_model_prediction_history(self, index, generated_tokens_index, from_=-1): """Clears the model prediction history for a specific sample in the batch. First deletes all history of finished tokens, then deletes history of tokens that were prediction, but then got removed because of speculation Args: index (int): index of the sample in the batch generated_tokens_index (list[int]): Generated tokens at the index from_ (int, optional): From which token to delete all the history. Defaults to -1. """ all_indices = list(self.model_prediction_history[index].keys()) for token in all_indices: all_none = all([logprob is None for logprob in self.model_prediction_history[index][token].values()]) finished = self.formula.is_finished(self.model_prediction_history[index][token]) if all_none or finished or (from_ != -1 and token > from_): if finished and len(generated_tokens_index) > token and self.calculate_statistics: self.add_monitor_token_probs(generated_tokens_index[token], self.model_prediction_history[index][token], self.logprobs_history[index].get(token)) if finished: for model_index in range(len(self.model_last_token_prediction)): self.model_last_token_prediction[model_index][index] = max(token + 1, self.model_last_token_prediction[model_index][index]) del self.model_prediction_history[index][token] if from_ > -1: for model_index in range(len(self.model_last_token_prediction)): self.model_last_token_prediction[model_index][index] = min(from_ + 1, self.model_last_token_prediction[model_index][index]) def max_index_prediction_history(self, index): """Gets the max index of the model prediction history for a specific runnable operator Args: index (int): index of runnable operator in the list of runnable operators Returns: int: max index of its prediction """ keys = list(self.model_prediction_history[index].keys()) if len(keys) == 0: return 0 return max(self.model_prediction_history[index].keys()) def normal_sample(self, probs): """Samples from a probability distribution Args: probs (torch.tensor): Probability distribution Returns: int: Sampled token """ out = torch.multinomial(probs, 1) return out def KL_divergence(self, p, q): """Compuates KL divergence between two probability distributions Args: p (torch.tensor): probability distribution q (torch.tensor): probability distribution Returns: float: KL divergence """
class ModelArithmetic(PreTrainedModel): """ Main class for prompt arithmetic. Handles the generation of text based on the formula. """ SAVE_FILE = "prompt_arithmetic.json" _supports_sdpa = True def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None): """Initializes the prompt arithmetic model. Args: formula (Operator): The formula for which generations need to be made. default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None. dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16. intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False. epsilon (float, optional): Just some small value. Defaults to 1e-12. retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to []. calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True. needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task. lm_eval_task (str, optional): Name of the lm eval task. Defaults to None. tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None. """ self.formula = formula.clone() self.default_model = default_model self.loaded_models = dict() self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing) self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator self.output_type = namedtuple("ModelArithmeticOutput", ["logits", "logprobs_per_model"]) self.intermediate_argmax = intermediate_argmax self.retroactive_operators = retroactive_operators self.calculate_statistics = calculate_statistics self.runnable_operators = [] for runnable_operator in self.formula.runnable_operators(): if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]): self.runnable_operators.append(runnable_operator) # sort the prompts by speculative factor, putting the one with highest speculative factor first # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones # however, we first need to sort by run_priority and then within that by speculative factor self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True) self.load_all_models(dtype=dtype) if self.default_model not in self.loaded_models: for runnable_operator in self.runnable_operators: if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None: self.default_model = runnable_operator.model break if self.default_model is None: raise ValueError("Default model must be specified if not specified in an llm prompt") self.config = self.loaded_models[str(self.default_model)].config if tokenizer is None: self.tokenizer = load_tokenizer(self.default_model) else: self.tokenizer = tokenizer self.init_runnable_operators() self.model_input_tokens = { runnable_operator.id(): TokenizedInput(runnable_operator, runnable_operator.model, self.loaded_models[str(runnable_operator.model)].config, self.tokenizer) for runnable_operator in self.runnable_operators } self.init_monitor() self.epsilon = epsilon self.word_size = len(self.tokenizer) if Compatibility is not None: self.lm_eval_compatibility = Compatibility( task_name=lm_eval_task, needs_input_tokens_lm_eval=needs_input_tokens_lm_eval, tokenizer=self.tokenizer, device=self.device, max_length=get_max_length(self.config), ) else: self.lm_eval_compatibility = None super().__init__(self.config) def init_monitor(self): """ Initializes the monitor for the prompt arithmetic model. """ self.monitor = Monitor(self.runnable_operators) def init_runnable_operators(self): """Initializes the runnable operators. This is done after the models have been loaded, because the models are needed for the runnable operators. """ for runnable_operator in self.runnable_operators: if runnable_operator.model is None: runnable_operator.model = self.default_model runnable_operator.initialize_after_model_set() def load_all_models(self, dtype=torch.bfloat16): """Loads all the models that are needed for the runnable operators. Models are never loaded twice. Args: dtype (torch.dtype, optional): Default Dtype of the models. Defaults to torch.bfloat16. """ if self.default_model is None: for runnable_operator in self.runnable_operators: if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None: self.default_model = str(runnable_operator.model) break for runnable_operator in self.runnable_operators: if runnable_operator.model is None: assert self.default_model is not None, "Default model must be specified if not specified in prompt" runnable_operator.model = self.default_model if runnable_operator.model not in self.loaded_models: model = runnable_operator.load_model(dtype=dtype) model.eval() if model is not None: self.loaded_models[str(runnable_operator.model)] = model if len(self.loaded_models) == 0: assert self.default_model is not None, "Required to at least have one model, for now" self.loaded_models[str(self.default_model)] = load_model(self.default_model, dtype=dtype) @property def device(self): """Device of the default model. Needed for compatibility with lm_eval Returns: torch.device: Device of the default model. """ return self.loaded_models[str(self.default_model)].device def save_pretrained(self, path : str): """Saves the model to the specified path. Args: path (str): Path to which to save the model """ os.makedirs(path, exist_ok=True) all_settings = { "formula": self.formula.generate_settings(), "default_model": self.default_model, } with open(os.path.join(path, self.SAVE_FILE), "w") as f: json.dump(all_settings, f, indent=4, sort_keys=True) @classmethod def from_pretrained(cls, path : str, dtype=torch.bfloat16): """Loads the model from the specified path. Args: path (str): Path from which to load the model dtype (torch.dtype, optional): Default dtype for the models. Defaults to torch.bfloat16. Returns: ModelArithmetic: model arithmetic model """ with open(os.path.join(path, cls.SAVE_FILE), "r") as f: all_settings = json.load(f) all_settings["formula"] = Operator.load_from_settings(all_settings["formula"]) return cls(**all_settings, dtype=dtype) def forward_model(self, runnable_operator, continuation_tokens, model_new_tokens=None, use_cache=False, do_speculation=False): """Runs a specifc runnable operator on the continuation tokens. Args: runnable_operator (RunnableOperator): The runnable operator to run. continuation_tokens (list[list[int]]): List of tokens that need to be continued. The prompt is not included in these tokens model_new_tokens (list[int], optional): New tokens for the model. Defaults to None. use_cache (bool, optional): Whether or not to allow the model to use cache (eg key-value storage for an LLM). Defaults to False. do_speculation (bool, optional): Whether or not to do speculation sampling. Defaults to False. Returns: torch.tensor: logprobs of the model, one logprob distribution for each new token in each sample """ start_time = time.time() tokenized_input_creator = self.model_input_tokens[runnable_operator.id()] tokenized_inputs = tokenized_input_creator.add_continuation_tokens(continuation_tokens) tokenized_only_input = tokenized_input_creator.get_only_input_tokens() was_none = model_new_tokens is None if was_none: model_new_tokens = torch.tensor([len(continuation_tokens[i]) + 1 for i in range(len(continuation_tokens))]) if len(self.model_prediction_history) < len(continuation_tokens): new_prediction_history = [dict() for _ in range(len(continuation_tokens))] else: new_prediction_history = [self.model_prediction_history[i].get(self.max_index_prediction_history(i), dict()) for i in range(len(continuation_tokens))] logprobs = runnable_operator.run( loaded_models=self.loaded_models, tokenized_inputs=tokenized_inputs, model_new_tokens=model_new_tokens, new_prediction_history=new_prediction_history, other_tokenizer=self.tokenizer, tokenized_only_input=tokenized_only_input, use_cache=use_cache, do_speculation=do_speculation ) logprobs = [logprob.to(self.device) for logprob in logprobs] if was_none: logprobs = torch.stack(logprobs, dim=0) self.monitor.add_result(element=time.time() - start_time, runnable_operator=runnable_operator) return logprobs def group_complete(self, model_history): """Checks which groups of runnable operators have been completely calculated and which haven't. Args: model_history (dict): Dict mapping the runnable operator id to the logprobs of the model Returns: dict[bool]: Dict mapping the group to whether it has been completely calculated or not """ # everything that is a group needs to be either all calculated or all not calculated group_calculated = dict() groups = set([runnable_operator.group for runnable_operator in self.runnable_operators if runnable_operator.group is not None]) completed_groups = {group: True for group in groups} for runnable_operator in self.runnable_operators: if runnable_operator.group is not None: is_calculated = model_history.get(runnable_operator.id()) is not None if runnable_operator.group not in group_calculated: group_calculated[runnable_operator.group] = is_calculated elif group_calculated[runnable_operator.group] != is_calculated: completed_groups[runnable_operator.group] = False return completed_groups def group_model_history(self, model_history): """Sets the model history on which to evaluate the formula based on the groups. Removes predictions if the group hasn't been completely calculated yet. Args: model_history (dict): Dict mapping the runnable operator id to the logprobs of the model Returns: dict: Adjusted dict mapping """ completed_groups = self.group_complete(model_history) grouped_model_history = dict() for runnable_operator in self.runnable_operators: if runnable_operator.group is None or completed_groups[runnable_operator.group]: grouped_model_history[runnable_operator.id()] = model_history[runnable_operator.id()] else: grouped_model_history[runnable_operator.id()] = None return grouped_model_history def create_sample_logprobs(self, logprobs, temperature, top_k, top_p): """Creates the logprobs for each token in each sample. Args: logprobs (torch.tensor): Logprobs of the model temperature (float): temperature to use top_k (int): top_k to use top_p (float): top_p to use Returns: torch.tensor: Logprobs for each token in each sample """ if temperature == 0: logprobs_argmax = torch.argmax(logprobs, dim=-1) logprobs = torch.nn.functional.one_hot(logprobs_argmax, num_classes=logprobs.shape[-1]).float() return logprobs logprobs = logprobs / temperature logprobs = top_k_top_p_filtering(logprobs.unsqueeze(0), top_k=top_k, top_p=top_p) return torch.softmax(logprobs, dim=-1).squeeze() def process_logprobs(self, model_history): """Processes model history to get the probability distribution for the token. Args: model_history (dict): Dict mapping the runnable operator id to the logprobs of the model Returns: _type_: _description_ """ init_time = time.time() logprobs_normalized = self.formula.evaluate(model_history) self.monitor.add_result(element=time.time() - init_time, indicator="formula_evaluation") if not torch.is_tensor(logprobs_normalized): return None # logprobs_normalized = logprobs_normalized / temperature # logprobs_normalized = top_k_top_p_filtering(logprobs_normalized.unsqueeze(0), top_k=top_k, top_p=top_p) return logprobs_normalized def run_retroactive_operators(self, index, tokenized_sentence, temperature, top_k, top_p): """Runs the retroactive operators on the tokenized sentence. Args: index (int): Index of the sentence in the current batch tokenized_sentence (list[int]): Tokenized sentence temperature (float): temperature to use top_k (int): top_k to use top_p (float): top_p to use Returns: list[int]: Adjusted tokenized sentence based on the retroactive operators and whether they accepted it. """ for operator in self.retroactive_operators: accepted = operator.accept(tokenized_sentence, self.tokenizer) if accepted < 0: not_accepted_token = tokenized_sentence[accepted] self.clear_model_prediction_history(index, tokenized_sentence, from_=len(tokenized_sentence) + accepted) tokenized_sentence = tokenized_sentence[:len(tokenized_sentence) + accepted] self.logprobs_history[index][len(tokenized_sentence)][not_accepted_token] = -torch.inf if torch.all(self.logprobs_history[index][len(tokenized_sentence)] == -torch.inf): self.logprobs_history[index][len(tokenized_sentence)] = torch.zeros_like(self.logprobs_history[index][len(tokenized_sentence)]) probs_to_sample = self.create_sample_logprobs( self.logprobs_history[index][len(tokenized_sentence)], temperature=temperature, top_k=top_k, top_p=top_p ) new_token = torch.multinomial(probs_to_sample, 1).item() tokenized_sentence.append(new_token) return self.run_retroactive_operators(index, tokenized_sentence, temperature, top_k, top_p) return tokenized_sentence def speculation_sample(self, token, previous_models_probs, new_models_probs): """Sample a token based on the previous and new model probabilities in the speculative sampling way. Also returns whether the token was accepted or not. Args: token (int): Token that is currently selected previous_models_probs (torch.tensor): Model probabilities of the previous models new_models_probs (torch.tensor): Model probabilities of the new models Returns: (int, bool): New token and whether or not the input token was accepted """ acceptance_prob = torch.minimum(torch.tensor(1.0), new_models_probs[token] / (previous_models_probs[token] + torch.tensor(self.epsilon))) # TODO: the next line is taking an enormous amount of time because of asynchronous computing on gpu's and requiring it to be returned immediately # Therefore do batch processing acceptance_prob = float(acceptance_prob) self.monitor.add_result(element=float(acceptance_prob), indicator="acceptance_prob") # self.monitor.add_result(element=self.entropy(previous_models_probs).item(), indicator="entropy_previous") # self.monitor.add_result(element=previous_models_probs[token].item(), indicator="probability_previous") if torch.rand(1) < acceptance_prob: return token, True else: new_proba_distrib = torch.relu(new_models_probs - previous_models_probs) new_proba_distrib /= torch.sum(new_proba_distrib) new_token = torch.multinomial(new_proba_distrib, 1).item() return new_token, False def add_new_result(self, generated_tokens, num_new_tokens, runnable_operator, new_model_logprobs, top_p, top_k, temperature): """Adds a new run of a runnable operator to the model prediction history. Also does speculation sampling if needed. Args: generated_tokens (list[list[int]]): Currently generated tokens by the model num_new_tokens (list[int]): Number of new tokens for each sample in the batch runnable_operator (RunnableOperator): Runnable operator that was run new_model_logprobs (List[torch.tensor]): Output of the run function of the runnable operator top_p (flaot): top_p to use top_k (int): top_k to use temperature (float): temperature to use Returns: list[bool]: For each sample in the batch, whether all tokens in that sample were kept or not """ all_kept = [] for i in range(len(generated_tokens)): n_generated_tokens = len(generated_tokens[i]) kept = True for n_token in range(n_generated_tokens - num_new_tokens[i] + 1, n_generated_tokens + 1): # initialize the model prediction history self.model_prediction_history[i][n_token] = self.model_prediction_history[i].get(n_token, {runnable_operator.id(): None for runnable_operator in self.runnable_operators}) # check if we need to do speculation sampling, only needed when a previous token was sampled do_speculation_sample = n_token < n_generated_tokens # speculation sampling not needed if the model was run before if self.model_prediction_history[i][n_token][runnable_operator.id()] is not None: do_speculation_sample = False # speculation sampling not needed if all models have not been run yet: this is the first model on this token if all([logprob is None for logprob in self.model_prediction_history[i][n_token].values()]): do_speculation_sample = False # This means that this token was already fully accepted, so we can just continue (can happen if batch_size > 1 or when end is triggered) if self.max_index_prediction_history(i) > n_token: continue # add the new model logprobs self.model_prediction_history[i][n_token][runnable_operator.id()] = new_model_logprobs[i][-n_generated_tokens + n_token + num_new_tokens[i] - 1] group_model_history = self.group_model_history(self.model_prediction_history[i][n_token]) # group_model_history needs to be separately checked, since it could be that the group is not yet fully calculated # also allow no logprobs runnable operators (would lead to errors) if the formula is not finished yet (if it is finished, you need to) if all([logprob is None for logprob in group_model_history.values()]) or (not runnable_operator.outputs_logprobs and not self.formula.is_finished(group_model_history)): continue # process the logprobs new_model_probs = self.process_logprobs(group_model_history) if self.intermediate_argmax and not self.formula.is_finished(group_model_history): argmax_el = torch.argmax(new_model_probs) new_model_probs = torch.zeros_like(new_model_probs) new_model_probs[argmax_el] = 1.0 if do_speculation_sample: if self.calculate_statistics: self.monitor.add_result(self.expected_acceptance_prob(self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), self.create_sample_logprobs(self.logprobs_history[i].get(n_token), temperature, top_k, top_p)), indicator="expected_acceptance_prob", runnable_operator=runnable_operator) new_token, kept = self.speculation_sample( token = generated_tokens[i][n_token], previous_models_probs=self.create_sample_logprobs(self.logprobs_history[i][n_token], temperature, top_k, top_p), new_models_probs=self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), ) if n_token in self.model_prediction_history[i]: self.logprobs_history[i][n_token] = new_model_probs if not kept: # if not kept, we change the generated tokens and remove the model prediction history after that token generated_tokens[i][n_token] = new_token generated_tokens[i] = generated_tokens[i][:n_token + 1] self.clear_model_prediction_history(i, generated_tokens[i], from_=n_token) self.trigger_end[i] = False elif n_token in self.model_prediction_history[i]: self.logprobs_history[i][n_token] = new_model_probs if not kept: break all_kept.append(kept) return all_kept def clear_model_prediction_history(self, index, generated_tokens_index, from_=-1): """Clears the model prediction history for a specific sample in the batch. First deletes all history of finished tokens, then deletes history of tokens that were prediction, but then got removed because of speculation Args: index (int): index of the sample in the batch generated_tokens_index (list[int]): Generated tokens at the index from_ (int, optional): From which token to delete all the history. Defaults to -1. """ all_indices = list(self.model_prediction_history[index].keys()) for token in all_indices: all_none = all([logprob is None for logprob in self.model_prediction_history[index][token].values()]) finished = self.formula.is_finished(self.model_prediction_history[index][token]) if all_none or finished or (from_ != -1 and token > from_): if finished and len(generated_tokens_index) > token and self.calculate_statistics: self.add_monitor_token_probs(generated_tokens_index[token], self.model_prediction_history[index][token], self.logprobs_history[index].get(token)) if finished: for model_index in range(len(self.model_last_token_prediction)): self.model_last_token_prediction[model_index][index] = max(token + 1, self.model_last_token_prediction[model_index][index]) del self.model_prediction_history[index][token] if from_ > -1: for model_index in range(len(self.model_last_token_prediction)): self.model_last_token_prediction[model_index][index] = min(from_ + 1, self.model_last_token_prediction[model_index][index]) def max_index_prediction_history(self, index): """Gets the max index of the model prediction history for a specific runnable operator Args: index (int): index of runnable operator in the list of runnable operators Returns: int: max index of its prediction """ keys = list(self.model_prediction_history[index].keys()) if len(keys) == 0: return 0 return max(self.model_prediction_history[index].keys()) def normal_sample(self, probs): """Samples from a probability distribution Args: probs (torch.tensor): Probability distribution Returns: int: Sampled token """ out = torch.multinomial(probs, 1) return out def KL_divergence(self, p, q): """Compuates KL divergence between two probability distributions Args: p (torch.tensor): probability distribution q (torch.tensor): probability distribution Returns: float: KL divergence """
return torch.sum(p * torch.log((p + self.epsilon) / (q + self.epsilon)))
4
2023-11-21 20:01:08+00:00
16k
huang-yh/SelfOcc
model/encoder/tpvformer/tpvformer_encoder.py
[ { "identifier": "BaseEncoder", "path": "model/encoder/base_encoder.py", "snippet": "class BaseEncoder(BaseModule):\n \"\"\"Further encode 3D representations.\n image backbone -> neck -> lifter -> encoder -> segmentor\n \"\"\"\n\n def __init__(self, init_cfg=None, **kwargs):\n super().__init__(init_cfg)\n \n def forward(\n self, \n representation,\n ms_img_feats=None,\n metas=None,\n **kwargs\n ):\n pass" }, { "identifier": "point_sampling", "path": "model/encoder/bevformer/utils.py", "snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef point_sampling(reference_points, img_metas):\n reference_points = reference_points.float()\n\n lidar2img = []\n for img_meta in img_metas:\n lidar2img.append(img_meta['lidar2img'])\n if isinstance(lidar2img[0], (np.ndarray, list)):\n lidar2img = np.asarray(lidar2img)\n lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4)\n else:\n lidar2img = torch.stack(lidar2img, dim=0)\n\n reference_points = torch.cat(\n (reference_points, torch.ones_like(reference_points[..., :1])), -1)\n\n reference_points = reference_points.permute(1, 0, 2, 3)\n D, B, num_query = reference_points.size()[:3]\n num_cam = lidar2img.size(1)\n\n reference_points = reference_points.view(\n D, B, 1, num_query, 4, 1)\n\n lidar2img = lidar2img.view(\n 1, B, num_cam, 1, 4, 4)\n\n reference_points_cam = torch.matmul(\n lidar2img.to(torch.float32),\n reference_points.to(torch.float32)).squeeze(-1)\n \n eps = 1e-5\n\n # reference_points_cam[..., 0:2] = reference_points_cam[..., 0:2] * \\\n # img_metas[0]['scale_rate']\n \n if 'img_augmentation' in img_metas[0] and \\\n 'post_rots' in img_metas[0]['img_augmentation'] and \\\n 'post_trans' in img_metas[0]['img_augmentation']:\n post_rots = []\n post_trans = []\n for img_meta in img_metas:\n post_rots.append(img_meta['img_augmentation']['post_rots'].numpy())\n post_trans.append(img_meta['img_augmentation']['post_trans'].numpy())\n post_rots = np.asarray(post_rots)\n post_trans = np.asarray(post_trans)\n post_rots = reference_points.new_tensor(post_rots)\n post_trans = reference_points.new_tensor(post_trans)\n\n reference_points_cam[..., :2] = reference_points_cam[..., :2] / torch.maximum(\n reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps)\n \n # D, B, N, Q, 3, 1\n reference_points_cam = reference_points_cam[..., :3].unsqueeze(-1)\n post_rots = post_rots.view(1, B, num_cam, 1, 3, 3)\n reference_points_cam = torch.matmul(\n post_rots.to(torch.float32),\n reference_points_cam.to(torch.float32)).squeeze(-1)\n # D, B, N, Q, 3\n post_trans = post_trans.view(1, B, num_cam, 1, 3)\n reference_points_cam = reference_points_cam + post_trans\n tpv_mask = (reference_points_cam[..., 2:3] > eps) \n reference_points_cam = reference_points_cam[..., :2]\n else:\n tpv_mask = (reference_points_cam[..., 2:3] > eps)\n reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum(\n reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps)\n\n # reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1]\n # reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0]\n\n reference_points_cam[..., 0] /= img_metas[0]['img_shape'][1]\n reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0] # D, B, N, Q, 2\n\n tpv_mask = (tpv_mask & (reference_points_cam[..., 1:2] > 0.0)\n & (reference_points_cam[..., 1:2] < 1.0)\n & (reference_points_cam[..., 0:1] < 1.0)\n & (reference_points_cam[..., 0:1] > 0.0))\n\n tpv_mask = torch.nan_to_num(tpv_mask)\n\n reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) # N, B, Q, D, 2\n tpv_mask = tpv_mask.permute(2, 1, 3, 0, 4).squeeze(-1)\n\n if 'focal_ratios_x' in img_metas[0]:\n scales_x = np.asarray(img_metas[0]['focal_ratios_x'])\n scales_x = reference_points.new_tensor(scales_x).view(-1, 1, 1, 1, 1)\n reference_points_cam[..., :1] = reference_points_cam[..., :1] * scales_x\n scales_y = np.asarray(img_metas[0]['focal_ratios_y'])\n scales_y = reference_points.new_tensor(scales_y).view(-1, 1, 1, 1, 1)\n reference_points_cam[..., 1:] = reference_points_cam[..., 1:] * scales_y\n\n return reference_points_cam, tpv_mask" }, { "identifier": "get_cross_view_ref_points", "path": "model/encoder/tpvformer/utils.py", "snippet": "def get_cross_view_ref_points(tpv_h, tpv_w, tpv_z, num_points_in_pillar, offset=0):\n # ref points generating target: (#query)hw+zh+wz, (#level)3, #p, 2\n # generate points for hw and level 1\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n h_ranges = h_ranges.unsqueeze(-1).expand(-1, tpv_w).flatten()\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_h, -1).flatten()\n hw_hw = torch.stack([w_ranges, h_ranges], dim=-1) # hw, 2\n hw_hw = hw_hw.unsqueeze(1).expand(-1, num_points_in_pillar[2], -1) # hw, #p, 2\n # generate points for hw and level 2\n z_ranges = torch.linspace(offset, tpv_z-1+offset, num_points_in_pillar[2]) / tpv_z # #p\n z_ranges = z_ranges.unsqueeze(0).expand(tpv_h*tpv_w, -1) # hw, #p\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(-1, 1, 1).expand(-1, tpv_w, num_points_in_pillar[2]).flatten(0, 1)\n hw_zh = torch.stack([h_ranges, z_ranges], dim=-1) # hw, #p, 2\n # generate points for hw and level 3\n z_ranges = torch.linspace(offset, tpv_z-1+offset, num_points_in_pillar[2]) / tpv_z # #p\n z_ranges = z_ranges.unsqueeze(0).expand(tpv_h*tpv_w, -1) # hw, #p\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(1, -1, 1).expand(tpv_h, -1, num_points_in_pillar[2]).flatten(0, 1)\n hw_wz = torch.stack([z_ranges, w_ranges], dim=-1) # hw, #p, 2\n \n # generate points for zh and level 1\n w_ranges = torch.linspace(offset, tpv_w-1+offset, num_points_in_pillar[1]) / tpv_w\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_z*tpv_h, -1)\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(1, -1, 1).expand(tpv_z, -1, num_points_in_pillar[1]).flatten(0, 1)\n zh_hw = torch.stack([w_ranges, h_ranges], dim=-1)\n # generate points for zh and level 2\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(-1, 1, 1).expand(-1, tpv_h, num_points_in_pillar[1]).flatten(0, 1)\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(1, -1, 1).expand(tpv_z, -1, num_points_in_pillar[1]).flatten(0, 1)\n zh_zh = torch.stack([h_ranges, z_ranges], dim=-1) # zh, #p, 2\n # generate points for zh and level 3\n w_ranges = torch.linspace(offset, tpv_w-1+offset, num_points_in_pillar[1]) / tpv_w\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_z*tpv_h, -1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(-1, 1, 1).expand(-1, tpv_h, num_points_in_pillar[1]).flatten(0, 1)\n zh_wz = torch.stack([z_ranges, w_ranges], dim=-1)\n\n # generate points for wz and level 1\n h_ranges = torch.linspace(offset, tpv_h-1+offset, num_points_in_pillar[0]) / tpv_h\n h_ranges = h_ranges.unsqueeze(0).expand(tpv_w*tpv_z, -1)\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(-1, 1, 1).expand(-1, tpv_z, num_points_in_pillar[0]).flatten(0, 1)\n wz_hw = torch.stack([w_ranges, h_ranges], dim=-1)\n # generate points for wz and level 2\n h_ranges = torch.linspace(offset, tpv_h-1+offset, num_points_in_pillar[0]) / tpv_h\n h_ranges = h_ranges.unsqueeze(0).expand(tpv_w*tpv_z, -1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(1, -1, 1).expand(tpv_w, -1, num_points_in_pillar[0]).flatten(0, 1)\n wz_zh = torch.stack([h_ranges, z_ranges], dim=-1)\n # generate points for wz and level 3\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(-1, 1, 1).expand(-1, tpv_z, num_points_in_pillar[0]).flatten(0, 1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(1, -1, 1).expand(tpv_w, -1, num_points_in_pillar[0]).flatten(0, 1)\n wz_wz = torch.stack([z_ranges, w_ranges], dim=-1)\n\n reference_points = torch.cat([\n torch.stack([hw_hw, hw_zh, hw_wz], dim=1),\n torch.stack([zh_hw, zh_zh, zh_wz], dim=1),\n torch.stack([wz_hw, wz_zh, wz_wz], dim=1)\n ], dim=0) # hw+zh+wz, 3, #p, 2\n \n return reference_points" }, { "identifier": "GridMeterMapping", "path": "model/encoder/bevformer/mappings.py", "snippet": "class GridMeterMapping:\n\n def __init__(\n self,\n nonlinear_mode: Literal['linear_upscale', 'linear'] = 'linear_upscale',\n h_size=[128, 32],\n h_range=[51.2, 28.8],\n h_half=False,\n w_size=[128, 32],\n w_range=[51.2, 28.8],\n w_half=False,\n d_size=[20, 10],\n d_range=[-4.0, 4.0, 12.0]\n ) -> None:\n self.nonlinear_mode = nonlinear_mode\n if nonlinear_mode == 'linear_upscale':\n assert all([h == w for h, w in zip(h_size, w_size)])\n assert all([h == w for h, w in zip(h_range, w_range)])\n assert (not h_half) and (not w_half)\n self.mapping = NonLinearMapping(\n h_size[0],\n h_size[1],\n h_range[0],\n h_range[1],\n d_size[0],\n d_size[1],\n d_range)\n self.size_h = self.size_w = self.mapping.bev_size\n self.size_d = self.mapping.z_size\n elif nonlinear_mode == 'linear':\n self.mapping = LinearMapping(\n h_size,\n h_range,\n h_half,\n w_size,\n w_range,\n w_half,\n d_size,\n d_range)\n self.size_h = self.mapping.h_tot_len\n self.size_w = self.mapping.w_tot_len\n self.size_d = self.mapping.d_tot_len\n self.grid2meter = self.mapping.grid2meter\n self.meter2grid = self.mapping.meter2grid" }, { "identifier": "BEVCrossAttention", "path": "model/encoder/bevformer/attention/image_cross_attention.py", "snippet": "class BEVCrossAttention(BaseModule):\r\n \"\"\"\r\n Image cross-attention in TPVFormer. Enable every tpv query to interact with its corresponding \r\n area on the image feature plane.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_cams=6,\r\n dropout=0.1,\r\n init_cfg=None,\r\n batch_first=True,\r\n deformable_attention=dict(\r\n type='MSDeformableAttention3D',\r\n embed_dims=256,\r\n num_levels=4),\r\n **kwargs):\r\n super().__init__(init_cfg)\r\n\r\n self.init_cfg = init_cfg\r\n self.dropout = nn.Dropout(dropout)\r\n self.deformable_attention = build_attention(deformable_attention)\r\n self.embed_dims = embed_dims\r\n self.num_cams = num_cams\r\n self.output_proj = nn.Linear(embed_dims, embed_dims)\r\n self.batch_first = batch_first\r\n self.init_weight()\r\n\r\n def init_weight(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n\r\n # @force_fp32(apply_to=('query', 'key', 'value', 'reference_points_cams'))\r\n # @torch.cuda.amp.autocast(enabled=False)\r\n def forward(self,\r\n query,\r\n key,\r\n value,\r\n residual=None,\r\n spatial_shapes=None,\r\n reference_points_cams=None,\r\n bev_masks=None,\r\n level_start_index=None,\r\n **kwargs):\r\n \"\"\"Forward Function of Detr3DCrossAtten.\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n (bs, num_query, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n (bs, num_key, embed_dims).\r\n value (Tensor): The value tensor with shape\r\n (bs, num_key, embed_dims).\r\n residual (Tensor): The tensor used for addition, with the\r\n same shape as `x`. Default None. If None, `x` will be used.\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different level. With shape (num_levels, 2),\r\n last dimension represent (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape (num_levels) and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n if key is None:\r\n key = query\r\n if value is None:\r\n value = key\r\n\r\n if residual is None:\r\n residual = query \r\n bs, num_query, _ = query.size()\r\n\r\n slots = torch.zeros_like(query)\r\n # indexeses = []\r\n # max_lens = []\r\n # queries_rebatches = []\r\n # reference_points_rebatches = []\r\n # for tpv_idx, tpv_mask in enumerate(tpv_masks):\r\n indexes = []\r\n for _, mask_per_img in enumerate(bev_masks):\r\n index_query_per_img = mask_per_img[0].sum(-1).nonzero().squeeze(-1)\r\n indexes.append(index_query_per_img)\r\n max_len = max([len(each) for each in indexes])\r\n # max_lens.append(max_len)\r\n # indexeses.append(indexes)\r\n\r\n reference_points_cam = reference_points_cams\r\n D = reference_points_cam.size(3)\r\n\r\n queries_rebatch = query.new_zeros(\r\n [bs * self.num_cams, max_len, self.embed_dims])\r\n reference_points_rebatch = reference_points_cam.new_zeros(\r\n [bs * self.num_cams, max_len, D, 2])\r\n\r\n for i, reference_points_per_img in enumerate(reference_points_cam):\r\n for j in range(bs):\r\n index_query_per_img = indexes[i]\r\n queries_rebatch[j * self.num_cams + i, :len(index_query_per_img)] = query[j, index_query_per_img]\r\n reference_points_rebatch[j * self.num_cams + i, :len(index_query_per_img)] = reference_points_per_img[j, index_query_per_img]\r\n \r\n # queries_rebatches.append(queries_rebatch)\r\n # reference_points_rebatches.append(reference_points_rebatch)\r\n\r\n num_cams, l, bs, embed_dims = key.shape\r\n\r\n key = key.permute(2, 0, 1, 3).reshape(\r\n self.num_cams * bs, l, self.embed_dims)\r\n value = value.permute(2, 0, 1, 3).reshape(\r\n self.num_cams * bs, l, self.embed_dims)\r\n\r\n query = self.deformable_attention(\r\n query=queries_rebatch, key=key, value=value,\r\n reference_points=reference_points_rebatch, \r\n spatial_shapes=spatial_shapes,\r\n level_start_index=level_start_index,)\r\n \r\n # for tpv_idx, indexes in enumerate(indexeses):\r\n for i, index_query_per_img in enumerate(indexes):\r\n for j in range(bs):\r\n slots[j, index_query_per_img] += query[j * self.num_cams + i, :len(index_query_per_img)]\r\n\r\n count = bev_masks.sum(-1) > 0\r\n count = count.permute(1, 2, 0).sum(-1)\r\n count = torch.clamp(count, min=1.0)\r\n slots = slots / count[..., None]\r\n slots = self.output_proj(slots)\r\n\r\n return self.dropout(slots) + residual\r" }, { "identifier": "BEVDeformableAttention", "path": "model/encoder/bevformer/attention/image_cross_attention.py", "snippet": "class BEVDeformableAttention(BaseModule):\r\n \"\"\"An attention module used in Deformable-Detr.\r\n\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 8.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to False.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n value_proj_ratio (float): The expansion ratio of value_proj.\r\n Default: 1.0.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims: int = 256,\r\n num_heads: int = 8,\r\n num_levels: int = 4,\r\n num_points: int = 4,\r\n im2col_step: int = 64,\r\n dropout: float = 0.1,\r\n batch_first: bool = False,\r\n norm_cfg: Optional[dict] = None,\r\n init_cfg: Optional[mmengine.ConfigDict] = None,\r\n value_proj_ratio: float = 1.0):\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.batch_first = batch_first\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims, num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims,\r\n num_heads * num_levels * num_points)\r\n value_proj_size = int(embed_dims * value_proj_ratio)\r\n self.value_proj = nn.Linear(embed_dims, value_proj_size)\r\n self.init_weights()\r\n\r\n def init_weights(self) -> None:\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n device = next(self.parameters()).device\r\n thetas = torch.arange(\r\n self.num_heads, dtype=torch.float32,\r\n device=device) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n # for i in range(self.num_points):\r\n # grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n @no_type_check\r\n def forward(self,\r\n query: torch.Tensor,\r\n key: Optional[torch.Tensor] = None,\r\n value: Optional[torch.Tensor] = None,\r\n identity: Optional[torch.Tensor] = None,\r\n query_pos: Optional[torch.Tensor] = None,\r\n key_padding_mask: Optional[torch.Tensor] = None,\r\n reference_points: Optional[torch.Tensor] = None,\r\n spatial_shapes: Optional[torch.Tensor] = None,\r\n level_start_index: Optional[torch.Tensor] = None,\r\n **kwargs) -> torch.Tensor:\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n\r\n Args:\r\n query (torch.Tensor): Query of Transformer with shape\r\n (num_query, bs, embed_dims).\r\n key (torch.Tensor): The key tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n value (torch.Tensor): The value tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n identity (torch.Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (torch.Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_padding_mask (torch.Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n reference_points (torch.Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n spatial_shapes (torch.Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (torch.Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n\r\n Returns:\r\n torch.Tensor: forwarded results with shape\r\n [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n value = query\r\n\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n if reference_points.shape[-1] == 2:\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n sampling_locations = reference_points[:, :, None, None, :, :] \\\r\n + sampling_offsets \\\r\n / offset_normalizer[None, None, None, :, None, :]\r\n elif reference_points.shape[-1] == 4:\r\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\r\n + sampling_offsets / self.num_points \\\r\n * reference_points[:, :, None, :, None, 2:] \\\r\n * 0.5\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n if ((IS_CUDA_AVAILABLE and value.is_cuda)\r\n or (IS_MLU_AVAILABLE and value.is_mlu)):\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n\r\n if not self.batch_first:\r\n # (num_query, bs ,embed_dims)\r\n output = output.permute(1, 0, 2)\r\n\r\n return output\r" }, { "identifier": "TPVCrossAttention", "path": "model/encoder/tpvformer/attention/image_cross_attention.py", "snippet": "class TPVCrossAttention(BaseModule):\r\n\r\n def __init__(\r\n self,\r\n embed_dims=256,\r\n num_cams=6,\r\n dropout=0.1, \r\n init_cfg=None,\r\n batch_first=True,\r\n num_heads=16,\r\n num_levels=4,\r\n num_points=[64, 64, 8]):\r\n super().__init__(init_cfg)\r\n\r\n deformable_attn_config_hw = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[2],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_hw = build_attention(deformable_attn_config_hw)\r\n\r\n deformable_attn_config_zh = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[1],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_zh = build_attention(deformable_attn_config_zh)\r\n \r\n deformable_attn_config_wz = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[0],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_wz = build_attention(deformable_attn_config_wz)\r\n self.attns = [self.attn_hw, self.attn_zh, self.attn_wz]\r\n\r\n def forward(self,\r\n query,\r\n key,\r\n value,\r\n residual=None,\r\n spatial_shapes=None,\r\n reference_points_cams=None,\r\n tpv_masks=None,\r\n level_start_index=None,\r\n **kwargs):\r\n result = []\r\n\r\n for i in range(3):\r\n out = self.attns[i](\r\n query[i],\r\n key,\r\n value,\r\n residual[i] if residual is not None else None,\r\n spatial_shapes=spatial_shapes,\r\n level_start_index=level_start_index,\r\n reference_points_cams=reference_points_cams[i],\r\n bev_masks=tpv_masks[i])\r\n result.append(out)\r\n\r\n return result\r" }, { "identifier": "CrossViewHybridAttention", "path": "model/encoder/tpvformer/attention/cross_view_hybrid_attention.py", "snippet": "class CrossViewHybridAttention(MultiScaleDeformableAttention):\n\n @no_type_check\n @deprecated_api_warning({'residual': 'identity'},\n cls_name='MultiScaleDeformableAttention')\n def forward(self,\n query: torch.Tensor,\n key: Optional[torch.Tensor] = None,\n value: Optional[torch.Tensor] = None,\n identity: Optional[torch.Tensor] = None,\n query_pos: Optional[torch.Tensor] = None,\n key_padding_mask: Optional[torch.Tensor] = None,\n reference_points: Optional[torch.Tensor] = None,\n spatial_shapes: Optional[torch.Tensor] = None,\n level_start_index: Optional[torch.Tensor] = None,\n **kwargs) -> torch.Tensor:\n \"\"\"Forward Function of MultiScaleDeformAttention.\n\n Args:\n query (torch.Tensor): Query of Transformer with shape\n (num_query, bs, embed_dims).\n key (torch.Tensor): The key tensor with shape\n `(num_key, bs, embed_dims)`.\n value (torch.Tensor): The value tensor with shape\n `(num_key, bs, embed_dims)`.\n identity (torch.Tensor): The tensor used for addition, with the\n same shape as `query`. Default None. If None,\n `query` will be used.\n query_pos (torch.Tensor): The positional encoding for `query`.\n Default: None.\n key_padding_mask (torch.Tensor): ByteTensor for `query`, with\n shape [bs, num_key].\n reference_points (torch.Tensor): The normalized reference\n points with shape (bs, num_query, num_levels, 2),\n all elements is range in [0, 1], top-left (0,0),\n bottom-right (1, 1), including padding area.\n or (N, Length_{query}, num_levels, 4), add\n additional two dimensions is (w, h) to\n form reference boxes.\n spatial_shapes (torch.Tensor): Spatial shape of features in\n different levels. With shape (num_levels, 2),\n last dimension represents (h, w).\n level_start_index (torch.Tensor): The start index of each level.\n A tensor has shape ``(num_levels, )`` and can be represented\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n\n Returns:\n torch.Tensor: forwarded results with shape\n [num_query, bs, embed_dims].\n \"\"\"\n\n if value is None:\n value = query\n\n if identity is None:\n identity = query\n if query_pos is not None:\n query = query + query_pos\n if not self.batch_first:\n # change to (bs, num_query ,embed_dims)\n query = query.permute(1, 0, 2)\n value = value.permute(1, 0, 2)\n\n bs, num_query, _ = query.shape\n bs, num_value, _ = value.shape\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\n\n value = self.value_proj(value)\n if key_padding_mask is not None:\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\n value = value.view(bs, num_value, self.num_heads, -1)\n sampling_offsets = self.sampling_offsets(query).view(\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\n attention_weights = self.attention_weights(query).view(\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\n attention_weights = attention_weights.softmax(-1)\n\n attention_weights = attention_weights.view(bs, num_query,\n self.num_heads,\n self.num_levels,\n self.num_points)\n if reference_points.shape[-1] == 2:\n offset_normalizer = torch.stack(\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\n ### changed here\n sampling_locations = reference_points[:, :, None, :, :, :] \\\n + sampling_offsets \\\n / offset_normalizer[None, None, None, :, None, :]\n elif reference_points.shape[-1] == 4:\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\n + sampling_offsets / self.num_points \\\n * reference_points[:, :, None, :, None, 2:] \\\n * 0.5\n else:\n raise ValueError(\n f'Last dim of reference_points must be'\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\n if ((IS_CUDA_AVAILABLE and value.is_cuda)\n or (IS_MLU_AVAILABLE and value.is_mlu)):\n output = MultiScaleDeformableAttnFunction.apply(\n value, spatial_shapes, level_start_index, sampling_locations,\n attention_weights, self.im2col_step)\n else:\n output = multi_scale_deformable_attn_pytorch(\n value, spatial_shapes, sampling_locations, attention_weights)\n\n output = self.output_proj(output)\n\n if not self.batch_first:\n # (num_query, bs ,embed_dims)\n output = output.permute(1, 0, 2)\n\n return self.dropout(output) + identity" }, { "identifier": "CameraAwareSE", "path": "model/encoder/tpvformer/modules/camera_se_net.py", "snippet": "class CameraAwareSE(nn.Module):\n\n def __init__(\n self,\n in_channels=96,\n mid_channels=192,\n out_channles=96):\n super().__init__()\n self.in_channels = in_channels\n self.mid_channels = mid_channels\n self.out_channels = out_channles\n self._init_layers()\n\n def _init_layers(self):\n self.bn = nn.BatchNorm1d(16)\n self.context_mlp = Mlp(16, self.mid_channels, self.mid_channels)\n self.context_se = SELayer(self.mid_channels) # NOTE: add camera-aware\n self.context_conv = nn.Conv2d(self.mid_channels,\n self.out_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n \n if self.in_channels == self.mid_channels:\n self.reduce_conv = nn.Identity()\n else:\n self.reduce_conv = nn.Sequential(\n nn.Conv2d(self.in_channels,\n self.mid_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(self.mid_channels),\n nn.ReLU(inplace=True))\n \n def init_weight(self):\n # nn.init.zeros_(self.context_se.conv_expand.weight)\n # nn.init.constant_(self.context_se.conv_expand.bias, 10.0)\n nn.init.zeros_(self.context_mlp.fc2.weight)\n nn.init.constant_(self.context_mlp.fc2.bias, 10.0)\n\n def forward(self, ms_img_feats, metas):\n intrins, sensor2ego = [], []\n for meta in metas:\n intrins.append(meta['intrinsic'])\n sensor2ego.append(meta['cam2ego'])\n intrins = np.asarray(intrins)\n intrins = ms_img_feats[0].new_tensor(intrins) # bs, N, 4, 4\n sensor2ego = np.asarray(sensor2ego)\n sensor2ego = ms_img_feats[0].new_tensor(sensor2ego)[..., :3, :]\n\n batch_size = intrins.shape[0]\n num_cams = intrins.shape[1]\n mlp_input = torch.cat(\n [\n torch.stack(\n [\n intrins[..., 0, 0],\n intrins[..., 1, 1],\n intrins[..., 0, 2],\n intrins[..., 1, 2],\n ],\n dim=-1,\n ),\n sensor2ego.view(batch_size, num_cams, -1),\n ],\n -1,\n ) # bs, N, 16\n mlp_input = self.bn(mlp_input.reshape(-1, mlp_input.shape[-1]))\n context_se = self.context_mlp(mlp_input)[..., None, None] # bs*N, c, 1, 1\n context_se = torch.sigmoid(context_se)\n\n outputs = []\n for i_scale, img_feats in enumerate(ms_img_feats):\n img_feats = self.reduce_conv(img_feats.flatten(0, 1)) # bs*N, c, h, w\n img_feats = self.context_se(img_feats, context_se)\n img_feats = self.context_conv(img_feats)\n outputs.append(img_feats.unflatten(0, (batch_size, num_cams)))\n\n return outputs" } ]
from mmseg.registry import MODELS from mmcv.cnn.bricks.transformer import build_positional_encoding, build_transformer_layer from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention from mmengine.model import ModuleList from torch.nn.init import normal_ from mmengine.logging import MMLogger from ..base_encoder import BaseEncoder from ..bevformer.utils import point_sampling from .utils import get_cross_view_ref_points from ..bevformer.mappings import GridMeterMapping from ..bevformer.attention import BEVCrossAttention, BEVDeformableAttention from .attention import TPVCrossAttention, CrossViewHybridAttention from .modules import CameraAwareSE import torch.nn as nn, torch, copy
12,100
torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False) cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self) self.register_buffer('cross_view_ref_points', cross_view_ref_points, False) # hw_grid_normed = hw_grid[..., [0, 1]].clone() # hw_grid_normed[..., 0] = hw_grid_normed[..., 0] / (size_h - 1) # hw_grid_normed[..., 1] = hw_grid_normed[..., 1] / (size_w - 1) # zh_grid_normed = zh_grid[..., [2, 0]].clone() # zh_grid_normed[..., 0] = zh_grid_normed[..., 0] / (size_d - 1) # zh_grid_normed[..., 1] = zh_grid_normed[..., 1] / (size_h - 1) # wz_grid_normed = wz_grid[..., [1, 2]].clone() # wz_grid_normed[..., 0] = wz_grid_normed[..., 0] / (size_w - 1) # wz_grid_normed[..., 1] = wz_grid_normed[..., 1] / (size_d - 1) # self.register_buffer('ref_2d_hw', hw_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_zh', zh_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_wz', wz_grid_normed, False) # H, W, 2 def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules(): if isinstance(m, BEVCrossAttention) or \ isinstance(m, MultiScaleDeformableAttention) or \ isinstance(m, BEVDeformableAttention) or \ isinstance(m, TPVCrossAttention) or \ isinstance(m, CrossViewHybridAttention): try: m.init_weight() except AttributeError: m.init_weights() normal_(self.level_embeds) normal_(self.cams_embeds) def forward_layers( self, tpv_query, # b, c, h, w key, value, tpv_pos=None, # b, h, w, c spatial_shapes=None, level_start_index=None, img_metas=None, **kwargs ): bs = tpv_query[0].shape[0] reference_points_cams, tpv_masks = [], [] for ref_3d in [self.ref_3d_hw, self.ref_3d_zh, self.ref_3d_wz]:
logger = MMLogger.get_instance('selfocc') @MODELS.register_module() class TPVFormerEncoder(BaseEncoder): def __init__( self, mapping_args: dict, # bev_inner=128, # bev_outer=32, # range_inner=51.2, # range_outer=51.2, # nonlinear_mode='linear_upscale', # z_inner=20, # z_outer=10, # z_ranges=[-5.0, 3.0, 11.0], embed_dims=128, num_cams=6, num_feature_levels=4, positional_encoding=None, num_points_cross=[64, 64, 8], num_points_self=[16, 16, 16], transformerlayers=None, num_layers=None, camera_aware=False, camera_aware_mid_channels=None, init_cfg=None): super().__init__(init_cfg) # self.bev_inner = bev_inner # self.bev_outer = bev_outer # self.range_inner = range_inner # self.range_outer = range_outer # assert nonlinear_mode == 'linear_upscale' # TODO # self.nonlinear_mode = nonlinear_mode # self.z_inner = z_inner # self.z_outer = z_outer # self.z_ranges = z_ranges self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False) cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self) self.register_buffer('cross_view_ref_points', cross_view_ref_points, False) # hw_grid_normed = hw_grid[..., [0, 1]].clone() # hw_grid_normed[..., 0] = hw_grid_normed[..., 0] / (size_h - 1) # hw_grid_normed[..., 1] = hw_grid_normed[..., 1] / (size_w - 1) # zh_grid_normed = zh_grid[..., [2, 0]].clone() # zh_grid_normed[..., 0] = zh_grid_normed[..., 0] / (size_d - 1) # zh_grid_normed[..., 1] = zh_grid_normed[..., 1] / (size_h - 1) # wz_grid_normed = wz_grid[..., [1, 2]].clone() # wz_grid_normed[..., 0] = wz_grid_normed[..., 0] / (size_w - 1) # wz_grid_normed[..., 1] = wz_grid_normed[..., 1] / (size_d - 1) # self.register_buffer('ref_2d_hw', hw_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_zh', zh_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_wz', wz_grid_normed, False) # H, W, 2 def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules(): if isinstance(m, BEVCrossAttention) or \ isinstance(m, MultiScaleDeformableAttention) or \ isinstance(m, BEVDeformableAttention) or \ isinstance(m, TPVCrossAttention) or \ isinstance(m, CrossViewHybridAttention): try: m.init_weight() except AttributeError: m.init_weights() normal_(self.level_embeds) normal_(self.cams_embeds) def forward_layers( self, tpv_query, # b, c, h, w key, value, tpv_pos=None, # b, h, w, c spatial_shapes=None, level_start_index=None, img_metas=None, **kwargs ): bs = tpv_query[0].shape[0] reference_points_cams, tpv_masks = [], [] for ref_3d in [self.ref_3d_hw, self.ref_3d_zh, self.ref_3d_wz]:
reference_points_cam, tpv_mask = point_sampling(
1
2023-11-20 12:49:14+00:00
16k
MobileTeleSystems/CoolGraph
cool_graph/runners.py
[ { "identifier": "RawDataProcessor", "path": "cool_graph/data/data_processor.py", "snippet": "class RawDataProcessor:\n \"\"\"\n Preprocessing datasets.\n\n Args:\n groups_names (Dict[int, str]): Name of groups in nodes.\n group_names_node_features (Dict[str, List[str]]): Name of features in groups in nodes.\n mon_nodes_path (str): path to nodes\n mon_edges_path (str): path to edges\n mon_labels_path (str): path to labels\n edge_index_cols (List[str]): columns of edge index in dataset\n label_index_col (str): columns of label index in dataset\n label_mask_col (str): mask of label columns\n read_edge_attr (bool): is set True - read edge features. Default to True.\n group_mask_col (str): Mask for group in data. Default to None.\n features_edges_names (List[str]): List of features on edge. Default to None.\n label_cols (List[str]): List of label columns. Default to None.\n target_names (List[str]): List of target names. Default to None.\n \"\"\"\n\n @staticmethod\n def _check_cols_in_parquet(columns: List[str], path: str) -> bool:\n \"\"\"Cheking colomns in parquet files.\n\n Args:\n columns (List[str]): columns of dataset\n path (str): path to dataset\n\n Raises:\n ValueError: if there is no any files with parquet extension\n ValueError: if there is no path with parquet extension\n\n Returns:\n bool: True if columns and path are right\n \"\"\"\n if columns:\n set_cols = set(columns if type(columns) == list else [columns])\n try:\n parquet_file = [path] if path.endswith(\".parquet\") else []\n parquet_file = (\n parquet_file\n + glob.glob(os.path.join(path, \"*.parquet\"), recursive=True)\n + glob.glob(os.path.join(path, \"**/*.parquet\"), recursive=True)\n )\n parquet_file = parquet_file[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Couldn't find any files with parquet extension in {path}\\n\n Original exception: \\n\n {str(ex)}\n \"\"\"\n )\n pqt_cols = set(pq.read_schema(parquet_file).names)\n if not set_cols.issubset(pqt_cols):\n diff = set_cols - pqt_cols\n raise ValueError(\n f\"\"\"\n \"{'\", \"'.join(diff)}\" were not found in {path}\n \"\"\"\n )\n return True\n\n def __init__(\n self,\n groups_names: Dict[int, str],\n group_names_node_features: Dict[str, List[str]],\n mon_nodes_path: str,\n mon_edges_path: str,\n mon_labels_path: str,\n edge_index_cols: List[str],\n label_index_col: str,\n label_mask_col: Optional[str] = None,\n read_edge_attr: bool = True,\n group_mask_col: Optional[str] = None,\n features_edges_names: Optional[List[str]] = None,\n label_cols: Optional[List[str]] = None,\n target_names: Optional[List[str]] = None,\n ) -> None:\n self._check_cols_in_parquet(group_mask_col, mon_nodes_path)\n self._check_cols_in_parquet(label_cols, mon_labels_path)\n self._check_cols_in_parquet([label_mask_col], mon_labels_path)\n self._check_cols_in_parquet([label_index_col], mon_labels_path)\n\n for key, val in group_names_node_features.items():\n try:\n self._check_cols_in_parquet(val, mon_nodes_path)\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n {str(ex)} for group {key} aka {groups_names[key]}\n \"\"\"\n )\n\n df_node_feats = pq.read_table(mon_nodes_path).to_pandas()\n df_labels = pq.read_table(mon_labels_path, columns=label_cols).to_pandas()\n df_edge_index = pq.read_table(\n mon_edges_path, columns=edge_index_cols\n ).to_pandas()\n\n # Nodes\n node_features = torch.FloatTensor(df_node_feats.values)\n group_mask = torch.IntTensor(df_node_feats[group_mask_col].values)\n node_features_names_fixed = df_node_feats.columns.tolist()\n\n # Labels\n df_labels.set_index(label_index_col, inplace=True)\n df_labels.sort_index(inplace=True)\n df_labels.reset_index(inplace=True)\n targets = {t: torch.LongTensor(df_labels[t].values) for t in target_names}\n label_mask = torch.BoolTensor(df_labels[label_mask_col].values)\n index = torch.LongTensor(df_labels[label_index_col].values)\n\n try:\n df_node_feats.shape[0] == df_labels.shape[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Length of features must be equal to the length of labels.\n \"\"\"\n )\n\n # Edges\n edge_index = torch.LongTensor(df_edge_index.values).T\n\n # Nodes\n self.node_features = node_features\n self.group_mask = group_mask\n self.targets = targets\n self.label_mask = label_mask\n self.index = index\n self.edge_index = edge_index\n\n # Edge features\n if read_edge_attr:\n df_edge_feats = pq.read_table(\n mon_edges_path, columns=features_edges_names\n ).to_pandas()\n\n self.edge_features = torch.FloatTensor(df_edge_feats.values)\n self.edge_features_names = df_edge_feats.columns.tolist()\n else:\n self.edge_features = None\n self.edge_features_names = None\n\n self.read_edge_attr = read_edge_attr\n\n # Mappings\n inverse = {v: k for k, v in groups_names.items()}\n self.group_indices_node_findex = {\n inverse[key]: [node_features_names_fixed.index(f) for f in value]\n for key, value in group_names_node_features.items()\n }\n self.groups_names = groups_names\n\n def sample_data(\n self, num_neighbors: int, batch_size: int, seed: int = 0\n ) -> Dict[str, List[torch.utils.data.DataLoader]]:\n \"\"\"Samling data.\n\n Args:\n num_neighbors (int): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n seed (int, optional): Number of seed of samples. Defaults to 0.\n\n Returns:\n Dict[str, List[torch.utils.data.DataLoader]]: Sampled data.\n \"\"\"\n\n return create_loaders(\n self.node_features,\n self.edge_features,\n self.edge_index,\n self.read_edge_attr,\n num_neighbors,\n batch_size,\n self.group_mask,\n self.group_indices_node_findex,\n self.groups_names,\n self.label_mask,\n self.index,\n targets=self.targets,\n )" }, { "identifier": "get_auto_batch_size", "path": "cool_graph/data/batch.py", "snippet": "def get_auto_batch_size(\n groups_num_features: List[int],\n conv_type: Optional[Literal[\"NNConv\", \"GraphConv\"]] = None,\n conv1_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv2_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv3_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n n_hops: Optional[int] = None,\n lin_prep_size_common: Optional[int] = None,\n lin_prep_sizes: Optional[List[int]] = None,\n edge_attr_repr_sizes: Optional[List[int]] = None,\n num_edge_features: Optional[int] = None,\n device: str = \"cuda:0\",\n num_neighbors: Optional[List[int]] = None,\n) -> int:\n \"\"\"\n Аutomatic batch size calculation.\n Depending on model size and free GPU memory.\n\n Args:\n groups_num_features (List[int]): Number of feats in groups on nodes.\n conv_type (Literal[NNConv, GraphConv]): Model type\n conv1_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 1. Defaults to None.\n conv2_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 2. Defaults to None.\n conv3_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 3. Defaults to None.\n n_hops (int): Hop with neighbors. Defaults to None.\n lin_prep_size_common (int): Size of linear layer (in). Defaults to None.\n lin_prep_sizes (int): Size of linear layer (out). Defaults to None.\n edge_attr_repr_sizes (List[int]): Size of layer of edges attributes. Defaults to None.\n num_edge_features (int): Number of feats on edges. Defaults to None.\n device (str): The current GPU memory usage. Defaults to \"cuda:0\".\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration. Defaults to None.\n\n Returns:\n batch_size (int): Numbers of samples per batch to load.\n \"\"\"\n if lin_prep_sizes is None:\n lin_prep_sizes = []\n if device is None:\n device = \"cuda:0\"\n\n hop1_size = sum(conv1_aggrs.values())\n hop2_size = sum(conv2_aggrs.values()) if n_hops >= 2 else 0\n hop3_size = sum(conv3_aggrs.values()) if n_hops == 3 else 0\n\n max_size_node = max(\n *groups_num_features,\n lin_prep_size_common,\n *lin_prep_sizes,\n hop1_size,\n hop2_size,\n hop3_size,\n )\n\n max_size_edge = 0\n if conv_type == \"NNConv\":\n max_size_edge = max(\n *edge_attr_repr_sizes,\n num_edge_features,\n )\n\n max_size = max_size_node + max_size_edge * 1.5\n\n try:\n all([n != -1 for n in num_neighbors])\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Found -1, Need to know max neighbors per hop.\n \"\"\"\n )\n m_neighbors = np.prod(num_neighbors)\n\n free_memory = torch.cuda.mem_get_info(device=device)[0] / (1024**3) # GB\n\n floats_per_node_ = 320000\n batch_size_ = 250\n memory_reserved_max_ = 3.8\n\n batch_size = (\n 0.5\n * batch_size_\n * floats_per_node_\n / (m_neighbors * max_size)\n * (free_memory / memory_reserved_max_)\n )\n\n if conv_type == \"NNConv\":\n batch_size /= edge_attr_repr_sizes[-1] * 4\n\n batch_size = int(batch_size)\n\n return batch_size" }, { "identifier": "create_loaders", "path": "cool_graph/data/loaders.py", "snippet": "def create_loaders(\n data: Data = None,\n node_features: torch.FloatTensor = None,\n edge_features: torch.FloatTensor = None,\n edge_index: torch.LongTensor = None,\n read_edge_attr: bool = None,\n num_neighbors: List[int] = None,\n batch_size: int = None,\n group_mask: torch.LongTensor = None,\n groups_features: Dict[int, List[int]] = None,\n groups_names: Dict[int, str] = None,\n label_mask: torch.BoolTensor = None,\n index: torch.LongTensor = None,\n targets: Dict[str, torch.Tensor] = None,\n input_nodes: Optional[List] = None,\n node_feature_indices: Optional[List] = None,\n unique_groups: Optional[int] = None,\n) -> List[torch.utils.data.DataLoader]:\n \"\"\"\n Creating list loaders.\n\n Args:\n node_features (torch.FloatTensor): features on nodes on FloatTensor\n edge_features (torch.FloatTensor): features on edge on FloatTensor\n edge_index (torch.LongTensor): edge indices\n read_edge_attr (bool): if set True - read edge features.\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n group_mask (torch.LongTensor): Mask for groups in nodes.\n groups_features (Dict[int, List[int]]): Features in groups in nodes.\n groups_names (Dict[int, str]): Name of featutes in groups in nodes.\n label_mask (torch.BoolTensor): Mask for label.\n index (torch.LongTensor): index\n targets (Dict[str, torch.Tensor]): Labels.\n\n Returns:\n List[torch.utils.data.DataLoader]: Created DataLoader object. https://pytorch.org/docs/stable/data.html\n \"\"\"\n unique_groups = np.unique(group_mask)\n try:\n set(unique_groups).issubset(set(groups_features.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Group mask values should be a subset of feature groups keys\"\"\"\n )\n\n try:\n set(groups_features).issubset(set(groups_names.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Feature groups keys should be a subset of feature_groups_names\"\"\"\n )\n if data is None:\n data = Data(\n x=node_features,\n edge_index=edge_index,\n edge_attr=edge_features if read_edge_attr else None,\n group_mask=group_mask,\n label_mask=label_mask,\n index=index,\n **targets,\n )\n input_nodes = torch.nonzero(label_mask)[:, 0]\n\n loader = NeighborLoader(\n data,\n num_neighbors=num_neighbors,\n batch_size=batch_size,\n shuffle=True,\n input_nodes=input_nodes,\n )\n\n list_loader = []\n for sampled_data in tqdm(loader, desc=\"Sample data\"):\n sampled_data.label_mask[sampled_data.batch_size :] = False\n\n for group in unique_groups:\n name = groups_names[group]\n mask = sampled_data.group_mask == group\n features = groups_features[group]\n setattr(sampled_data, name, sampled_data.x[mask][:, features])\n\n del sampled_data.x\n\n list_loader.append(sampled_data)\n\n return list_loader" }, { "identifier": "setup_mlflow_from_config", "path": "cool_graph/logging/mlflow_logging.py", "snippet": "def setup_mlflow_from_config(config: Dict) -> None:\n \"\"\"\n Setup mlflow using logging.mlflow section of a config\n \"\"\"\n\n if config.get(\"MLFLOW_DISABLE_INSECURE_REQUEST_WARNING\", False):\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n for key, value in config.items():\n os.environ[key] = str(value)\n\n mlflow.set_tracking_uri(config.get(\"MLFLOW_TRACKING_URI\"))" }, { "identifier": "model_params_to_trial_params", "path": "cool_graph/parameter_search/example_objective.py", "snippet": "def model_params_to_trial_params(\n **model_params: Dict[str, Union[Literal[str], int, float, List, Dict]]\n) -> Dict[str, Union[Literal[str], int, float, List, Dict]]:\n \"\"\"\n Convert readable model_params to trial_params\n for example to run study.enqueue_trial(trial_params)\n \"\"\"\n trial = {}\n trial[\"activation\"] = model_params[\"activation\"]\n trial[\"lin_prep_len\"] = model_params[\"lin_prep_len\"]\n trial[\"lin_prep_dropout_rate\"] = model_params[\"lin_prep_dropout_rate\"]\n trial[\"lin_prep_weight_norm_flag\"] = model_params[\"lin_prep_weight_norm_flag\"]\n last_size = model_params[\"lin_prep_size_common\"]\n trial[\"lin_prep_size_common\"] = last_size\n for i in range(model_params[\"lin_prep_len\"]):\n trial[f\"lin_prep_size{i}_fraction\"] = np.clip(\n model_params[\"lin_prep_sizes\"][i] / last_size, 0.2, 1.0\n )\n last_size = model_params[\"lin_prep_sizes\"][i]\n\n trial[\"conv1_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"mean\"] / last_size, 0.1, 1.0\n )\n trial[\"conv1_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"max\"] / last_size, 0.05, 0.7\n )\n trial[\"conv1_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"add\"] / last_size, 0.05, 0.7\n )\n\n trial[\"conv1_dropout_rate\"] = model_params[\"conv1_dropout_rate\"]\n\n if model_params[\"n_hops\"] == 2:\n last_size = sum(model_params[\"conv1_aggrs\"].values())\n\n trial[\"conv2_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"mean\"] / last_size, 0.1, 0.7\n )\n trial[\"conv2_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"max\"] / last_size, 0.05, 0.5\n )\n trial[\"conv2_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"add\"] / last_size, 0.05, 0.5\n )\n\n trial[\"conv2_dropout_rate\"] = model_params[\"conv2_dropout_rate\"]\n\n if model_params[\"conv_type\"] == \"GraphConv\":\n trial[\"graph_conv_weight_norm_flag\"] = model_params[\n \"graph_conv_weight_norm_flag\"\n ]\n\n if model_params[\"conv_type\"] == \"NNConv\":\n trial[\"edge_attr_repr_len\"] = model_params[\"edge_attr_repr_len\"]\n for i in range(model_params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n trial[f\"edge_attr_repr_size{i}\"] = model_params[\"edge_attr_repr_sizes\"][\n i\n ]\n\n else:\n trial[f\"edge_attr_repr_size{i}_fraction\"] = np.clip(\n model_params[\"edge_attr_repr_sizes\"][i]\n / model_params[\"edge_attr_repr_sizes\"][i - 1],\n 0.2,\n 1.0,\n )\n\n trial[\"edge_attr_repr_size_last\"] = model_params[\"edge_attr_repr_sizes\"][-1]\n\n trial[\"edge_attr_repr_dropout_rate\"] = model_params[\n \"edge_attr_repr_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_last_dropout_rate_zero\"] = (\n model_params[\"edge_attr_repr_last_dropout_rate\"] == 0\n )\n if not trial[\"edge_attr_repr_last_dropout_rate_zero\"]:\n trial[\"edge_attr_repr_last_dropout_rate\"] = model_params[\n \"edge_attr_repr_last_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_weight_norm_flag\"] = model_params[\n \"edge_attr_repr_weight_norm_flag\"\n ]\n\n return trial" }, { "identifier": "sample_model_params", "path": "cool_graph/parameter_search/example_objective.py", "snippet": "def sample_model_params(trial: optuna.Trial, conv_type: str = \"GraphConv\") -> Dict:\n params = {}\n params[\"conv_type\"] = conv_type\n params[\"activation\"] = trial.suggest_categorical(\n \"activation\",\n [\n \"relu\", # 1st place\n \"prelu\", # 2nd place\n \"leakyrelu\",\n \"elu\",\n \"gelu\",\n ],\n )\n # NODE FEATURES PREP params\n params[\"lin_prep_len\"] = trial.suggest_int(\"lin_prep_len\", low=0, high=2)\n params[\"lin_prep_dropout_rate\"] = trial.suggest_uniform(\n \"lin_prep_dropout_rate\", low=0, high=0.5\n )\n params[\"lin_prep_weight_norm_flag\"] = trial.suggest_categorical(\n \"lin_prep_weight_norm_flag\", [False, True]\n )\n\n min_lin_prep_size_common = 32\n max_lin_prep_size_common = 1024\n\n last_size = trial.suggest_int(\n \"lin_prep_size_common\",\n min_lin_prep_size_common,\n max_lin_prep_size_common,\n log=True,\n )\n params[\"lin_prep_size_common\"] = last_size\n params[\"lin_prep_sizes\"] = []\n for i in range(params[\"lin_prep_len\"]):\n fraction = trial.suggest_loguniform(\n f\"lin_prep_size{i}_fraction\", low=0.2, high=1.0\n )\n last_size = max(16, int(np.round(last_size * fraction)))\n params[\"lin_prep_sizes\"].append(last_size)\n params[\"n_hops\"] = 2\n\n # CONV1 params\n\n params[\"conv1_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\"conv1_aggrs_mean_fraction\", low=0.1, high=1.0)\n params[\"conv1_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_max_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_add_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv1_dropout_rate\"] = trial.suggest_uniform(\n \"conv1_dropout_rate\", low=0, high=0.5\n )\n\n # return params\n # CONV2 params\n if params[\"n_hops\"] == 2:\n last_size = sum(params[\"conv1_aggrs\"].values())\n params[\"conv2_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_mean_fraction\", low=0.1, high=0.7\n )\n params[\"conv2_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_max_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_add_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv2_dropout_rate\"] = trial.suggest_uniform(\n \"conv2_dropout_rate\", low=0, high=0.5\n )\n if params[\"conv_type\"] == \"GraphConv\":\n params[\"graph_conv_weight_norm_flag\"] = trial.suggest_categorical(\n \"graph_conv_weight_norm_flag\", [False, True]\n )\n\n # EDGE ATTR params\n if params[\"conv_type\"] == \"NNConv\":\n params[\"edge_attr_repr_len\"] = trial.suggest_int(\n \"edge_attr_repr_len\", low=1, high=3\n )\n params[\"edge_attr_repr_sizes\"] = []\n for i in range(params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\n f\"edge_attr_repr_size{i}\", low=4, high=40, log=True\n )\n )\n else:\n fraction = trial.suggest_loguniform(\n f\"edge_attr_repr_size{i}_fraction\", low=0.2, high=1.0\n )\n params[\"edge_attr_repr_sizes\"].append(\n max(4, int(np.round(params[\"edge_attr_repr_sizes\"][-1] * fraction)))\n )\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\"edge_attr_repr_size_last\", low=1, high=5, log=True)\n )\n\n params[\"edge_attr_repr_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_dropout_rate\", low=0, high=0.5\n )\n if trial.suggest_categorical(\n \"edge_attr_repr_last_dropout_rate_zero\", [True, False]\n ):\n params[\"edge_attr_repr_last_dropout_rate\"] = 0.0\n else:\n params[\"edge_attr_repr_last_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_last_dropout_rate\", low=0, high=0.5\n )\n\n params[\"edge_attr_repr_weight_norm_flag\"] = trial.suggest_categorical(\n \"edge_attr_repr_weight_norm_flag\", [False, True]\n )\n\n params[\"edge_attr_repr_last_activation\"] = \"sigmoid\"\n\n return params" }, { "identifier": "Trainer", "path": "cool_graph/train/trainer.py", "snippet": "class Trainer(object):\n def __init__(\n self,\n list_loader_train: List[torch.utils.data.DataLoader],\n list_loader_test: List[torch.utils.data.DataLoader],\n checkpoint_dir: Union[str, pathlib.PosixPath],\n device: str = \"cuda:0\",\n eval_freq: int = 5,\n fill_value: Union[int, float] = -100,\n initial_lr: float = 0.0023,\n weight_decay: float = 0.001,\n loss_name: str = \"CrossEntropyLoss\",\n loss_label_smoothing: bool = False,\n loss_target_weights: Optional[Dict[str, Union[int, float]]] = None,\n loss_group_weights: Optional[List[float]] = None,\n groups_names: Optional[Dict[int, str]] = None,\n groups_names_num_features: Optional[Dict[str, int]] = None,\n num_edge_features: Optional[int] = None,\n main_metric_name: str = \"main_metric\",\n mlflow_experiment_name: Optional[str] = None,\n n_epochs: int = 10,\n scheduler_params: Dict[Literal[\"milestones\", \"gamma\"], int] = {\n \"milestones\": [10, 20, 35, 50, 70, 90, 105],\n \"gamma\": 0.25,\n },\n scheduler_type: str = \"MultiStepLR\",\n target_names: List[str] = [\"y\"],\n target_sizes: Optional[List[int]] = None,\n use_mlflow: bool = False,\n tqdm_disable=False,\n conv_type: Literal[\"NNConv\", \"GraphConv\"] = \"NNConv\",\n metrics: Optional[float] = None,\n log_all_metrics: bool = True,\n **model_params,\n ) -> None:\n \"\"\"\n Training model (GraphConv or NNConv).\n Class that training / logging / saving model. Using train_epoch\n and eval_epoch from helpers.py in training loop below.\n\n Args:\n list_loader_train (List[torch.utils.data.DataLoader]): Train list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n list_loader_test (List[torch.utils.data.DataLoader]): Test list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n checkpoint_dir (Union[str, pathlib.PosixPath]): Path for training checkpoints\n device (_type_, optional): The device is an object representing the device on\n which a torch.Tensor is or will be allocated.. Defaults to \"cuda:0\".\n eval_freq (int, optional): Number of epoch group. Defaults to 5.\n fill_value (Union[int, float], optional): If value is None. Defaults to -100.\n initial_lr (float, optional): The learning rate param for Optimization. Defaults to 0.0023.\n weight_decay (float, optional): weight decay (L2 penalty). Defaults to 0.001.\n loss_name (str, optional): This criterion computes the cross entropy loss between\n input logits and target. Defaults to \"CrossEntropyLoss\".\n https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html\n loss_label_smoothing (bool, optional): If set True, use label smoothing. Defaults to False.\n loss_target_weights (Optional[Dict[str, Union[int, float]]], optional): Weights for targets. Defaults to None.\n loss_group_weights (Optional[List[float]], optional): Weights for groups. Defaults to None.\n groups_names (Optional[Dict[int, str]], optional): List with group names in nodes. Defaults to None.\n groups_names_num_features (Optional[Dict[str, int]], optional): Number of feats in groups in nodes. Defaults to None.\n num_edge_features (Optional[int], optional): Number of feats on edges. Defaults to None.\n main_metric_name (str, optional): Main metric for maximaze. Defaults to \"main_metric\".\n mlflow_experiment_name (Optional[str], optional): Name of mlflow experiment. Defaults to None.\n n_epochs (int, optional): Number of epochs. Defaults to 10.\n scheduler_params (Dict, optional): Milestones (list) – List of epoch indices. Must be increasing.\n gamma (float) – Multiplicative factor of learning rate decay.\n Defaults to { \"milestones\": [10, 20, 35, 50, 70, 90, 105], \"gamma\": 0.25, }.\n scheduler_type (str, optional): Decays the learning rate of each parameter group\n by gamma once the number of epoch reaches one of the milestones. Defaults to \"MultiStepLR\".\n target_names (List[str], optional): List of target names. Defaults to [\"y\"].\n target_sizes (Optional[List[int]], optional): Size of list with target. Defaults to None.\n use_mlflow (bool, optional): If set True, use MLFlow. Defaults to False.\n tqdm_disable (bool, optional): Display progress. Defaults to False.\n conv_type (Literal[NNConv, GraphConv], optional): The graph neural network operator. Defaults to \"NNConv\".\n metrics (float, optional): Metrics. Defaults to None.\n log_all_metrics (bool, optional): If set True, logging all metrics. Defaults to True.\n\n Raises:\n NotImplementedError: _description_\n \"\"\"\n for key, value in locals().items():\n setattr(self, key, value)\n\n self._metrics = {}\n self._main_metric = {}\n if isinstance(metrics, str):\n metrics = [metrics]\n if isinstance(\n metrics,\n (\n list,\n tuple,\n ),\n ):\n metrics = {name: metrics for name in target_names}\n\n for k, names in metrics.items():\n self._metrics[k] = {name: get_metric(name) for name in names}\n self._main_metric[k] = names[0]\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n torch.cuda.empty_cache()\n gc.collect()\n\n if conv_type == \"NNConv\":\n self._model = NNConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n elif conv_type == \"GraphConv\":\n self._model = GraphConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n else:\n raise NotImplementedError(f\"{conv_type} is not implemented\")\n\n self._model.to(device)\n\n self._optimizer = torch.optim.Adam(\n self._model.parameters(),\n lr=initial_lr,\n weight_decay=weight_decay,\n )\n\n self._loss_criteria = getattr(torch.nn, loss_name)(\n reduction=\"none\", label_smoothing=loss_label_smoothing\n )\n self._use_edge_attr = conv_type == \"NNConv\"\n\n self._scheduler = getattr(torch.optim.lr_scheduler, scheduler_type)(\n self._optimizer, **scheduler_params\n )\n\n self._best_loss = {main_metric_name: -np.inf}\n\n self._train_run_lst = []\n self._test_metric_lst = []\n self._train_metric_lst = []\n\n def train(\n self, start_epoch: int = 0, end_epoch: Optional[int] = None\n ) -> Dict[\n Literal[\n \"best_loss\", \"global_calc_time\", \"train_loss\", \"test_metric\", \"train_metric\"\n ],\n float,\n ]:\n \"\"\"\n Training model and logging metrics.\n \"\"\"\n if end_epoch is None:\n end_epoch = self.n_epochs\n\n self.global_start_time = time.time()\n\n if self.use_mlflow:\n mlflow.end_run()\n mlflow.set_experiment(self.mlflow_experiment_name)\n mlflow.start_run()\n mlflow.log_params(\n {\n \"LossCriteria\": self._loss_criteria,\n \"checkpoint_dir\": self.checkpoint_dir,\n **self.model_params,\n }\n )\n\n for epoch in range(start_epoch, end_epoch):\n self.epoch = epoch\n # TRAIN\n train_run = train_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self._optimizer,\n self._use_edge_attr,\n target_weights=self.loss_target_weights,\n loss_criteria=self._loss_criteria,\n group_weights=self.loss_group_weights,\n tqdm_disable=self.tqdm_disable,\n )\n train_run[\"lr\"] = self._optimizer.param_groups[0][\"lr\"]\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_run, \"run_\"), step=epoch\n )\n train_run[\"epoch\"] = epoch\n self._train_run_lst.append(train_run)\n with open(\n os.path.join(self.checkpoint_dir, \"train_running_loss.txt\"), \"a\"\n ) as f:\n json.dump(train_run, f)\n f.write(\"\\n\")\n\n # calc metrics and perform scheduler step\n if (epoch - 0) % self.eval_freq == 0:\n # calc metrics\n # test\n logger.info(\"\\nEpoch {:03d}: \".format(epoch))\n test_metric = eval_epoch(\n self._model,\n self.list_loader_test,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"test\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n fill_value=self.fill_value,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(test_metric, \"test_\"), step=epoch\n )\n test_metric[\"epoch\"] = epoch\n self._test_metric_lst.append(test_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"test_metric.txt\"), \"a\"\n ) as f:\n json.dump(test_metric, f)\n f.write(\"\\n\")\n\n # train\n logger.info(\"Epoch {:03d}: \".format(epoch))\n train_metric = eval_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"train\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_metric, \"train_\"), step=epoch\n )\n train_metric[\"epoch\"] = epoch\n self._train_metric_lst.append(train_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"train_metric.txt\"), \"a\"\n ) as f:\n json.dump(train_metric, f)\n f.write(\"\\n\")\n\n # save model\n checkpoint_file = os.path.join(\n self.checkpoint_dir, f\"state_dict_{epoch:0>4d}.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n\n if (\n test_metric[self.main_metric_name]\n > self._best_loss[self.main_metric_name]\n ):\n self._best_loss = test_metric\n self._best_loss[\"epoch\"] = epoch\n checkpoint_file = os.path.join(\n self.checkpoint_dir, \"state_dict_best.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n with open(\n os.path.join(self.checkpoint_dir, \"best_loss.txt\"), \"w\"\n ) as f:\n json.dump(self._best_loss, f, indent=4)\n\n self.mlflow_log_metrics(\n {\n \"best_epoch\": self._best_loss[\"epoch\"],\n f\"best_{self.main_metric_name}\": self._best_loss[\n self.main_metric_name\n ],\n },\n step=epoch,\n )\n\n if self.scheduler_type == \"ReduceLROnPlateau\":\n self._scheduler.step(train_run[\"total_loss\"])\n if (\n self._optimizer.param_groups[0][\"lr\"]\n <= self.scheduler_params[\"min_lr\"]\n ):\n break\n else:\n self._scheduler.step()\n\n self.global_calc_time = time.time() - self.global_start_time\n train_loss = pd.DataFrame(self._train_run_lst)\n test_metric = pd.DataFrame(self._test_metric_lst)\n train_metric = pd.DataFrame(self._train_metric_lst)\n\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(self._best_loss, \"best_\")\n )\n self.mlflow_log_metrics({\"global_calc_time\": self.global_calc_time})\n\n if self.use_mlflow:\n mlflow.end_run()\n torch.cuda.empty_cache()\n\n return {\n \"best_loss\": self._best_loss,\n \"global_calc_time\": self.global_calc_time,\n \"train_loss\": train_loss,\n \"test_metric\": test_metric,\n \"train_metric\": train_metric,\n }\n\n def mlflow_log_metrics(\n self, metrics: Dict[str, Any], step: Optional[int] = None\n ) -> None:\n if self.use_mlflow:\n try:\n mlflow.log_metrics(metrics, step)\n except MlflowException as e:\n save_str_e = traceback.format_exc()\n logger.info(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )\n with open(\n os.path.join(self.checkpoint_dir, \"MlflowExceptions.txt\"), \"a\"\n ) as f:\n f.write(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )" } ]
import os import pathlib import hydra import numpy as np import optuna import pandas as pd import torch from datetime import datetime from itertools import product from pathlib import Path from typing import Dict, List, Literal, Optional from hydra import ( compose, core, initialize, initialize_config_dir, initialize_config_module, ) from omegaconf import DictConfig, OmegaConf from optuna.trial import TrialState from sklearn.model_selection import train_test_split from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader, NeighborSampler from tqdm import tqdm from cool_graph.data import RawDataProcessor from cool_graph.data.batch import get_auto_batch_size from cool_graph.data.loaders import create_loaders from cool_graph.logging import setup_mlflow_from_config from cool_graph.parameter_search import ( model_params_to_trial_params, sample_model_params, ) from cool_graph.train import Trainer
12,112
print(" Params: ") for i in trial_dataset["number"].tolist(): if trial_dataset["value"][i] == trial_dataset["value"].max(): print(dict_with_params[i]) return trial_dataset class MultiRunner: """ Runner for heterogeneous graph Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (list): Indices for train data. Defaults to None. test_idx (list): Indices for test data. Defaults to None. """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, **kwargs, ) -> None: if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/full.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) cfg = OmegaConf.to_container(config, resolve=True) self.cfg = cfg self.data = data self.test_size = test_size self.train_size = train_size self.seed = seed self.train_idx = train_idx self.test_idx = test_idx self.node_feature_indices = cfg["data"]["node_feature_indices"] self.target_names = cfg["training"]["targets"] self.groups_names = cfg["data"]["groups_names"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.read_edge_attr = cfg["data"].get("read_edge_attr", True) self.batch_size = cfg["training"]["batch_size"] self.group_mask_col = cfg["data"]["group_mask_col"] self.label_mask_col = cfg["data"]["label_mask_col"] self.label_cols = cfg["data"]["label_cols"] self.label_index_col = cfg["data"]["label_index_col"] self.edge_index_cols = cfg["data"]["edge_index_cols"] self.num_neighbors = cfg["training"]["num_neighbors"] self.features_edges_names = cfg["data"].get("features_edges") self.group_names_node_features = cfg["data"]["features"] self.metrics = cfg["metrics"] self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) os.makedirs(self.chkpt_dir, exist_ok=True) for k, v in kwargs.items(): setattr(self, k, v) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [len(v) for _, v in self.group_names_node_features.items()], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=len(self.cfg["data"].get("features_edges", [])), device=self.cfg["training"]["device"], num_neighbors=self.cfg["training"]["num_neighbors"], ) else: self._batch_size = self.batch_size if (self.train_idx is None) or (self.test_idx is None): train_idx, test_idx = train_test_split( torch.nonzero(self.data.label_mask)[:, 0], train_size=self.train_size, test_size=self.test_size, random_state=self.seed, shuffle=True, ) self.train_idx = train_idx self.test_idx = test_idx unique_groups = np.unique(self.data.group_mask)
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict: assert path_base in ("cfg", "cwd") core.global_hydra.GlobalHydra.instance().clear() if os.path.isabs(config): config_path = pathlib.Path(config).parent else: config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent config_name = pathlib.Path(config).name.replace(".yaml", "") initialize_config_dir(str(config_path), version_base=None) cfg = compose(config_name=config_name, overrides=overrides) return cfg class ConfigRunner: r"""Runner for cli mode. Using only in cli. This class allows to load data + split data per batchs + split data per train/val + training. See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ def __init__(self, config: Optional[DictConfig]) -> None: cfg = OmegaConf.to_container(config, resolve=True) self.cfg = cfg self.target_names = cfg["training"]["targets"] self.groups_names = cfg["data"]["groups_names"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.read_edge_attr = cfg["data"].get("read_edge_attr", True) self.batch_size = cfg["training"]["batch_size"] self.group_mask_col = cfg["data"]["group_mask_col"] self.label_mask_col = cfg["data"]["label_mask_col"] self.label_cols = cfg["data"]["label_cols"] self.label_index_col = cfg["data"]["label_index_col"] self.edge_index_cols = cfg["data"]["edge_index_cols"] self.num_neighbors = cfg["training"]["num_neighbors"] self.features_edges_names = cfg["data"].get("features_edges") self.group_names_node_features = cfg["data"]["features"] self.train_paths = cfg["data"]["train"] self.val_paths = cfg["data"]["validation"] self.metrics = cfg["metrics"] self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) os.makedirs(self.chkpt_dir, exist_ok=True) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Using RawDataProcessor from cool_graph.data for preprocessing data from disk. """ self.train_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.train_paths["nodes_path"], mon_edges_path=self.train_paths["edges_path"], mon_labels_path=self.train_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) self.val_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.val_paths["nodes_path"], mon_edges_path=self.val_paths["edges_path"], mon_labels_path=self.val_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) def sample_data( self, seed=0 ) -> Dict[Literal["train", "validation"], List[torch.utils.data.DataLoader]]: """ Sampling data in batches. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [len(v) for _, v in self.group_names_node_features.items()], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=len(self.cfg["data"].get("features_edges", [])), device=self.cfg["training"]["device"], num_neighbors=self.cfg["training"]["num_neighbors"], ) else: self._batch_size = self.batch_size train_loaders = self.train_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) val_loaders = self.val_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) return {"train": train_loaders, "validation": val_loaders} def run(self, seed: int = 0) -> Dict[str, float]: """ Train model for train_samples and val_sampler. Args: seed (int): seed for training. Default to 0. Returns: result (dict): Result of training for each 5 epochs with metrics from config. """ if not (hasattr(self, "train_sampler") and hasattr(self, "val_sampler")): self.init_loaders() sampled = self.sample_data(seed=seed) train_loaders = sampled["train"] val_loaders = sampled["validation"] self.trainer = Trainer( train_loaders, val_loaders, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.cfg["training"]["loss"].get("target_weights"), loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.cfg["data"]["groups_names"], mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.cfg["training"]["targets"], use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, **self.cfg["model_params"], groups_names_num_features={ k: len(v) for k, v in self.group_names_node_features.items() }, num_edge_features=len(self.cfg["data"].get("features_edges", [])), metrics=self.metrics, ) result = self.trainer.train() return result class BaseRunner: def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ) -> None: """ Main class for Basic runner and Runner with Optuna. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (list): Indices for train data. Defaults to None. test_idx (list): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. """ if config is None: if config_path is None: if use_edge_attr: config_path = "./config/in_memory_data2.yaml" else: config_path = "./config/in_memory_data.yaml" config_path = os.path.join(os.path.dirname(__file__), config_path) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) cfg = OmegaConf.to_container(config, resolve=True) self.data = data self.cfg = cfg self.test_size = test_size self.train_size = train_size self.seed = seed self.train_idx = train_idx self.test_idx = test_idx self.use_edge_attr = use_edge_attr if use_edge_attr and data.edge_attr is None: raise BaseException( "data does not contain edge_attr, please set use_edge_attr=False" ) self.target_names = cfg["training"]["targets"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.batch_size = cfg["training"]["batch_size"] self.num_neighbors = cfg["training"]["num_neighbors"] self.metrics = cfg["metrics"] self.data.group_mask = torch.zeros(len(data.x), dtype=torch.int8) self.data.label_mask = torch.ones(len(data.x), dtype=torch.bool) self.groups_names = {0: "x"} self.groups_names_num_features = {"x": data.x.shape[1]} if len(data.y.shape) == 2: self.target_sizes = [] self.target_names = [] self.target_weights = {} for i in range(data.y.shape[1]): y_sub = data.y[:, i] setattr(data, f"y{i}", y_sub) self.target_sizes.append(len(y_sub.unique())) self.target_names.append(f"y{i}") self.target_weights[f"y{i}"] = 1 else: self.target_names = ["y"] self.target_sizes = [len(data.y.unique())] self.target_weights = {"y": 1} if use_edge_attr: self.num_edge_features = data.edge_attr.shape[1] else: self.num_edge_features = 0 self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) for k, v in kwargs.items(): setattr(self, k, v) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Sampling data into batches and sampling data with NeighborLoader into list loaders. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [ self.groups_names_num_features[self.groups_names[i]] for i in range(len(self.groups_names)) ], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=self.num_edge_features, device=self.cfg["training"]["device"], num_neighbors=self.num_neighbors, ) else: self._batch_size = self.batch_size if (self.train_idx is None) or (self.test_idx is None): train_idx, test_idx = train_test_split( torch.nonzero(self.data.label_mask)[:, 0], train_size=self.train_size, test_size=self.test_size, random_state=self.seed, shuffle=True, ) self.train_idx = train_idx self.test_idx = test_idx def sample_date_prerpoc(sampled_data: Data) -> Data: sampled_data.label_mask[sampled_data.batch_size :] = False for group, name in self.groups_names.items(): x = getattr(sampled_data, name)[sampled_data.group_mask == group] setattr(sampled_data, name, x) return sampled_data loader_train = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.train_idx, ) list_loader_train = [] for sampled_data in tqdm(loader_train, desc="Sample data"): list_loader_train.append(sample_date_prerpoc(sampled_data)) self.train_loader = list_loader_train loader_test = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.test_idx, ) list_loader_test = [] for sampled_data in tqdm(loader_test, desc="Sample data"): list_loader_test.append(sample_date_prerpoc(sampled_data)) self.test_loader = list_loader_test class Runner(BaseRunner): """ Runner for notebook launch. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (int): Indices for train data. Defaults to None. test_idx (int): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. Examples -------- >>> from cool_graph.runners import Runner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = Runner(data) >>> result = runner.run() >>> result["best_loss"] {'accuracy': 0.916, 'cross_entropy': 0.286, 'f1_micro': 0.916, 'calc_time': 0.004, 'main_metric': 0.916, 'epoch': 10} Also you can override params in Runner: runner = Runner(data, metrics=['accuracy'], batch_size='auto', train_size=0.7, test_size=0.3, overrides=['training.n_epochs=1'], config_path=path/to/config) result = runner.run() """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, use_edge_attr, **kwargs, ) def run(self) -> Dict[str, float]: """ Training model with params in_memory_data/in_memory_data2 config. See the configs in ./config for knowing what excactly using as logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() return result class HypeRunner(BaseRunner): """ Runner for optimization model with Optuna. https://optuna.readthedocs.io/en/stable/reference/index.html 1st trial - with default config params (hyper_params). Also, 2nd trial - you can add own trial as argument enqueue_trial in optimazire_run method, and next trial optuna optimize model params randomly, if set None randomly optimization after 1st default trial. Args: data (Data): Loaded dataset. config (DictConfig): Confif with patams (model_params, logging, training, metrics). Default to None. config_path (str): Path with config structure (can be loaded with cli get_config). Default to None. overrides (list): Own params in list. Default to None. train_size (int): Own train size. Default to None. test (int): Own test size. Default to None. seed (int): The desired seed. Default to None. train_idx (list): List of train indices. test_idx (list): List of test indices. Examples -------- >>> from cool_graph.runners import HypeRunner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float: self.cfg["model_params"] = sample_model_params( trial, conv_type=self.cfg["model_params"]["conv_type"] ) list_with_params.append(self.cfg["model_params"]) self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get( "mlflow_experiment_name" ), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() output = result["best_loss"]["main_metric"] output = round(output, 3) return output # default params for the 1st trial in Optuna optimization trial_params = model_params_to_trial_params(**self.cfg["model_params"]) trial_params["weight_decay"] = self.cfg["training"].get("weight_decay", 0.0) self.study = optuna.create_study( storage=storage, study_name=study_name, direction="maximize", load_if_exists=True, sampler=optuna.samplers.RandomSampler(seed=120), ) # adding a trial_params as a default one to optuna optimization self.study.enqueue_trial(trial_params) # users params for the 2nd trial, # if None use optuna random params to trial if enqueue_trial: for param in enqueue_trial: user_params = model_params_to_trial_params(**param) self.study.enqueue_trial(user_params) self.study.optimize( objective, n_trials=n_trials, n_jobs=1, show_progress_bar=False ) complete_trials = self.study.get_trials( deepcopy=False, states=[TrialState.COMPLETE] ) print("Study statistics: ") print(" Number of finished trials: ", len(self.study.trials)) print(" Number of complete trials: ", len(complete_trials)) trial = self.study.best_trial dict_with_params = dict(enumerate(list_with_params)) print("Best trial:") print(" Value: ", trial.value) trials_dataset = self.study.trials_dataframe() trials_dataset = trials_dataset[ [ "number", "value", "datetime_start", "datetime_complete", "duration", "system_attrs_fixed_params", "state", ] ] trial_dataset = pd.concat( [trials_dataset, pd.DataFrame(dict_with_params).T], axis=1 ) print(" Params: ") for i in trial_dataset["number"].tolist(): if trial_dataset["value"][i] == trial_dataset["value"].max(): print(dict_with_params[i]) return trial_dataset class MultiRunner: """ Runner for heterogeneous graph Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (list): Indices for train data. Defaults to None. test_idx (list): Indices for test data. Defaults to None. """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, **kwargs, ) -> None: if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/full.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) cfg = OmegaConf.to_container(config, resolve=True) self.cfg = cfg self.data = data self.test_size = test_size self.train_size = train_size self.seed = seed self.train_idx = train_idx self.test_idx = test_idx self.node_feature_indices = cfg["data"]["node_feature_indices"] self.target_names = cfg["training"]["targets"] self.groups_names = cfg["data"]["groups_names"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.read_edge_attr = cfg["data"].get("read_edge_attr", True) self.batch_size = cfg["training"]["batch_size"] self.group_mask_col = cfg["data"]["group_mask_col"] self.label_mask_col = cfg["data"]["label_mask_col"] self.label_cols = cfg["data"]["label_cols"] self.label_index_col = cfg["data"]["label_index_col"] self.edge_index_cols = cfg["data"]["edge_index_cols"] self.num_neighbors = cfg["training"]["num_neighbors"] self.features_edges_names = cfg["data"].get("features_edges") self.group_names_node_features = cfg["data"]["features"] self.metrics = cfg["metrics"] self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) os.makedirs(self.chkpt_dir, exist_ok=True) for k, v in kwargs.items(): setattr(self, k, v) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [len(v) for _, v in self.group_names_node_features.items()], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=len(self.cfg["data"].get("features_edges", [])), device=self.cfg["training"]["device"], num_neighbors=self.cfg["training"]["num_neighbors"], ) else: self._batch_size = self.batch_size if (self.train_idx is None) or (self.test_idx is None): train_idx, test_idx = train_test_split( torch.nonzero(self.data.label_mask)[:, 0], train_size=self.train_size, test_size=self.test_size, random_state=self.seed, shuffle=True, ) self.train_idx = train_idx self.test_idx = test_idx unique_groups = np.unique(self.data.group_mask)
self.train_loader = create_loaders(
2
2023-11-22 09:44:16+00:00
16k
HeliosZhao/Animate124
guidance/cn_utils.py
[ { "identifier": "save_videos_grid", "path": "nerf/utils.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, n_rows=8, fps=8, **kwargs):\n ## videos b f c h w\n videos = rearrange(videos, \"b t c h w -> t b c h w\")\n outputs = []\n for x in videos:\n # x: b,c,h,w\n x = make_grid(x, nrow=n_rows) # c,h,w?\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1) # h,w,3 or h,w?\n outputs.append(x.detach().cpu().numpy())\n\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "save_tensor2image", "path": "nerf/utils.py", "snippet": "def save_tensor2image(x: torch.Tensor, path, channel_last=False, quality=75, **kwargs):\n # assume the input x is channel last\n # ipdb.set_trace()\n # if x.ndim == 4:\n # if channel_last:\n # x = x.permute(0, 3, 1, 2) \n # TF.to_pil_image(make_grid(x, value_range=(0, 1), **kwargs)).save(path, quality=quality)\n if x.ndim == 5:\n ## video\n # ipdb.set_trace()\n path = os.path.splitext(path)[0] + '.mp4' # convert image to mp4\n # B,F,C,H,W or B,F,H,W,C\n if channel_last: # B,F,H,W,C\n x = rearrange(x, \"b f h w c -> b f c h w\")\n save_videos_grid(x, path, **kwargs)\n else:\n if channel_last:\n x = x.permute(0, 3, 1, 2) \n TF.to_pil_image(make_grid(x, value_range=(0, 1), **kwargs)).save(path, quality=quality)" }, { "identifier": "StableDiffusionControlNetImg2ImgPipeline", "path": "controlnet/stable_diffusion_controlnet_img2img.py", "snippet": "class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):\n \"\"\"\n Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/\n \"\"\"\n\n _optional_components = [\"safety_checker\", \"feature_extractor\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n super().__init__()\n\n if safety_checker is None and requires_safety_checker:\n logger.warning(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n if safety_checker is not None and feature_extractor is None:\n raise ValueError(\n \"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety\"\n \" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.\"\n )\n\n if isinstance(controlnet, (list, tuple)):\n controlnet = MultiControlNetModel(controlnet)\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n controlnet=controlnet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.register_to_config(requires_safety_checker=requires_safety_checker)\n # import ipdb\n # ipdb.set_trace()\n\n def enable_vae_slicing(self):\n r\"\"\"\n Enable sliced VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several\n steps. This is useful to save some memory and allow larger batch sizes.\n \"\"\"\n self.vae.enable_slicing()\n\n def disable_vae_slicing(self):\n r\"\"\"\n Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_slicing()\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n Note that offloading happens on a submodule basis. Memory savings are higher than with\n `enable_model_cpu_offload`, but performance is lower.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:\n cpu_offload(cpu_offloaded_model, device)\n\n if self.safety_checker is not None:\n cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)\n\n def enable_model_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n # the safety checker can offload the vae again\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # control net hook has be manually offloaded as it alternates with unet\n cpu_offload_with_hook(self.controlnet, device)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook\n\n @property\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module\n hooks.\n \"\"\"\n if not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n do_classifier_free_guidance (`bool`):\n whether to use classifier free guidance or not\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n \"\"\"\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance and negative_prompt_embeds is None:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = prompt_embeds.shape[1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n negative_prompt_embeds = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n negative_prompt_embeds = negative_prompt_embeds[0]\n\n if do_classifier_free_guidance:\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n\n negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n return prompt_embeds\n\n def run_safety_checker(self, image, device, dtype):\n if self.safety_checker is not None:\n safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors=\"pt\").to(device)\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(dtype)\n )\n else:\n has_nsfw_concept = None\n return image, has_nsfw_concept\n\n def decode_latents(self, latents):\n latents = 1 / self.vae.config.scaling_factor * latents\n image = self.vae.decode(latents).sample\n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n return image\n\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds):\n image_is_pil = isinstance(image, PIL.Image.Image)\n image_is_tensor = isinstance(image, torch.Tensor)\n image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n raise TypeError(\n \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n )\n\n if image_is_pil:\n image_batch_size = 1\n elif image_is_tensor:\n image_batch_size = image.shape[0]\n elif image_is_pil_list:\n image_batch_size = len(image)\n elif image_is_tensor_list:\n image_batch_size = len(image)\n else:\n raise ValueError(\"controlnet condition image is not valid\")\n\n if prompt is not None and isinstance(prompt, str):\n prompt_batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n prompt_batch_size = len(prompt)\n elif prompt_embeds is not None:\n prompt_batch_size = prompt_embeds.shape[0]\n else:\n raise ValueError(\"prompt or prompt_embeds are not valid\")\n\n if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n raise ValueError(\n f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n )\n\n def check_inputs(\n self,\n prompt,\n image,\n controlnet_conditioning_image,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n strength=None,\n controlnet_guidance_start=None,\n controlnet_guidance_end=None,\n controlnet_conditioning_scale=None,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n\n # check controlnet condition image\n\n if isinstance(self.controlnet, ControlNetModel):\n self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds)\n elif isinstance(self.controlnet, MultiControlNetModel):\n if not isinstance(controlnet_conditioning_image, list):\n raise TypeError(\"For multiple controlnets: `image` must be type `list`\")\n\n if len(controlnet_conditioning_image) != len(self.controlnet.nets):\n raise ValueError(\n \"For multiple controlnets: `image` must have the same length as the number of controlnets.\"\n )\n\n for image_ in controlnet_conditioning_image:\n self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds)\n else:\n assert False\n\n # Check `controlnet_conditioning_scale`\n\n if isinstance(self.controlnet, ControlNetModel):\n if not isinstance(controlnet_conditioning_scale, float):\n raise TypeError(\"For single controlnet: `controlnet_conditioning_scale` must be type `float`.\")\n elif isinstance(self.controlnet, MultiControlNetModel):\n if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(\n self.controlnet.nets\n ):\n raise ValueError(\n \"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have\"\n \" the same length as the number of controlnets\"\n )\n else:\n assert False\n\n if isinstance(image, torch.Tensor):\n if image.ndim != 3 and image.ndim != 4:\n raise ValueError(\"`image` must have 3 or 4 dimensions\")\n\n if image.ndim == 3:\n image_batch_size = 1\n image_channels, image_height, image_width = image.shape\n elif image.ndim == 4:\n image_batch_size, image_channels, image_height, image_width = image.shape\n else:\n assert False\n\n if image_channels != 3:\n raise ValueError(\"`image` must have 3 channels\")\n\n if image.min() < -1 or image.max() > 1:\n raise ValueError(\"`image` should be in range [-1, 1]\")\n\n if self.vae.config.latent_channels != self.unet.config.in_channels:\n raise ValueError(\n f\"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received\"\n f\" latent channels: {self.vae.config.latent_channels},\"\n f\" Please verify the config of `pipeline.unet` and the `pipeline.vae`\"\n )\n\n if strength < 0 or strength > 1:\n raise ValueError(f\"The value of `strength` should in [0.0, 1.0] but is {strength}\")\n\n if controlnet_guidance_start < 0 or controlnet_guidance_start > 1:\n raise ValueError(\n f\"The value of `controlnet_guidance_start` should in [0.0, 1.0] but is {controlnet_guidance_start}\"\n )\n\n if controlnet_guidance_end < 0 or controlnet_guidance_end > 1:\n raise ValueError(\n f\"The value of `controlnet_guidance_end` should in [0.0, 1.0] but is {controlnet_guidance_end}\"\n )\n\n if controlnet_guidance_start > controlnet_guidance_end:\n raise ValueError(\n \"The value of `controlnet_guidance_start` should be less than `controlnet_guidance_end`, but got\"\n f\" `controlnet_guidance_start` {controlnet_guidance_start} >= `controlnet_guidance_end` {controlnet_guidance_end}\"\n )\n\n def get_timesteps(self, num_inference_steps, strength, device):\n # get the original timestep using init_timestep\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start:]\n\n return timesteps, num_inference_steps - t_start\n\n def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n image = image.to(device=device, dtype=dtype)\n\n batch_size = batch_size * num_images_per_prompt\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.sample(generator)\n\n init_latents = self.vae.config.scaling_factor * init_latents\n\n if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:\n raise ValueError(\n f\"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.\"\n )\n else:\n init_latents = torch.cat([init_latents], dim=0)\n\n shape = init_latents.shape\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n\n # get latents\n init_latents = self.scheduler.add_noise(init_latents, noise, timestep)\n latents = init_latents\n\n return latents\n\n def _default_height_width(self, height, width, image):\n if isinstance(image, list):\n image = image[0]\n\n if height is None:\n if isinstance(image, PIL.Image.Image):\n height = image.height\n elif isinstance(image, torch.Tensor):\n height = image.shape[3]\n\n height = (height // 8) * 8 # round down to nearest multiple of 8\n\n if width is None:\n if isinstance(image, PIL.Image.Image):\n width = image.width\n elif isinstance(image, torch.Tensor):\n width = image.shape[2]\n\n width = (width // 8) * 8 # round down to nearest multiple of 8\n\n return height, width\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n controlnet_prompt: Union[str, List[str]] = None,\n image: Union[torch.Tensor, PIL.Image.Image] = None,\n controlnet_conditioning_image: Union[\n torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]\n ] = None,\n strength: float = 0.8,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n controlnet_conditioning_scale: Union[float, List[float]] = 1.0,\n controlnet_guidance_start: float = 0.0,\n controlnet_guidance_end: float = 1.0,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n image (`torch.Tensor` or `PIL.Image.Image`):\n `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will\n be masked out with `mask_image` and repainted according to `prompt`.\n controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):\n The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If\n the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can\n also be accepted as an image. The control image is automatically resized to fit the output image.\n strength (`float`, *optional*):\n Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`\n will be used as a starting point, adding more noise to it the larger the `strength`. The number of\n denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will\n be maximum and the denoising process will run for the full number of iterations specified in\n `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):\n The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added\n to the residual in the original unet.\n controlnet_guidance_start ('float', *optional*, defaults to 0.0):\n The percentage of total steps the controlnet starts applying. Must be between 0 and 1.\n controlnet_guidance_end ('float', *optional*, defaults to 1.0):\n The percentage of total steps the controlnet ends applying. Must be between 0 and 1. Must be greater\n than `controlnet_guidance_start`.\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n # 0. Default height and width to unet\n height, width = self._default_height_width(height, width, controlnet_conditioning_image)\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n image,\n controlnet_conditioning_image,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n strength,\n controlnet_guidance_start,\n controlnet_guidance_end,\n controlnet_conditioning_scale,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)\n\n # 3. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n )\n\n if controlnet_prompt is None:\n controlnet_prompt_embeds = prompt_embeds\n else:\n controlnet_prompt_embeds = self._encode_prompt(\n controlnet_prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=None,\n negative_prompt_embeds=negative_prompt_embeds,\n )\n # import ipdb\n # ipdb.set_trace()\n # 4. Prepare image, and controlnet_conditioning_image\n image = prepare_image(image) # -1,1 for vae # 1,3,H,W\n\n # condition image(s)\n if isinstance(self.controlnet, ControlNetModel):\n controlnet_conditioning_image = prepare_controlnet_conditioning_image(\n controlnet_conditioning_image=controlnet_conditioning_image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=self.controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n ) # 0-1 concate -> 2,3,H,W\n elif isinstance(self.controlnet, MultiControlNetModel):\n controlnet_conditioning_images = []\n\n for image_ in controlnet_conditioning_image:\n image_ = prepare_controlnet_conditioning_image(\n controlnet_conditioning_image=image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=self.controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n )\n\n controlnet_conditioning_images.append(image_)\n\n controlnet_conditioning_image = controlnet_conditioning_images\n else:\n assert False\n\n # 5. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n\n # 6. Prepare latent variables\n latents = self.prepare_latents(\n image,\n latent_timestep,\n batch_size,\n num_images_per_prompt,\n prompt_embeds.dtype,\n device,\n generator,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # compute the percentage of total steps we are at\n current_sampling_percent = i / len(timesteps)\n\n if (\n current_sampling_percent < controlnet_guidance_start\n or current_sampling_percent > controlnet_guidance_end\n ):\n # do not apply the controlnet\n down_block_res_samples = None\n mid_block_res_sample = None\n else:\n # apply the controlnet\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n latent_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=controlnet_conditioning_image,\n conditioning_scale=controlnet_conditioning_scale,\n return_dict=False,\n )\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n ).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if output_type == \"latent\":\n image = latents\n has_nsfw_concept = None\n elif output_type == \"pil\":\n # 8. Post-processing\n image = self.decode_latents(latents)\n\n # 9. Run safety checker\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n\n # 10. Convert to PIL\n image = self.numpy_to_pil(image)\n else:\n # 8. Post-processing\n image = self.decode_latents(latents)\n\n # 9. Run safety checker\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" } ]
from typing import List, Optional, Sequence, Tuple, Union, Mapping from dataclasses import dataclass from torch.cuda.amp import custom_bwd, custom_fwd from torch import Tensor from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler, DDIMScheduler, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DiffusionPipeline, ControlNetModel from diffusers.utils.import_utils import is_xformers_available, is_torch_version from os.path import isfile from pathlib import Path from PIL import Image from torchvision.io import read_image from torchvision import transforms from torchvision.transforms import functional as TVF from torchvision.utils import make_grid, save_image from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTokenizer, CLIPProcessor from einops import rearrange from nerf.utils import save_videos_grid, save_tensor2image from controlnet.stable_diffusion_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from easydict import EasyDict as edict import os import torch import torch.nn.functional as F import torch.nn as nn import torch.nn.functional as F import numpy as np import logging import torch import argparse import matplotlib.pyplot as plt import glob
13,515
# add noise noise = torch.randn_like(latents) latents_noisy = self.scheduler.add_noise(latents, noise, t) # pred noise latent_model_input = torch.cat([latents_noisy] * 2) down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=cn_text_embeddings, controlnet_cond=controlnet_conditioning_image, conditioning_scale=self.cn_scale, return_dict=False, ) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # perform guidance (high scale from paper!) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_text + guidance_scale * \ (noise_pred_text - noise_pred_uncond) grad_clipd = 0 # ipdb.set_trace() w = (1 - self.alphas[t])[:, None, None, None] # 1,1,1,1 # w = self.alphas[t] ** 0.5 * (1 - self.alphas[t]) # grad_sds = w * (noise_pred - noise) grad_sds = grad_scale * w * (noise_pred - noise) loss_sds = grad_sds.abs().mean().detach() grad_clipd = 0. loss_clipd = 0. grad = grad_clipd + grad_sds if grad_clip is not None: grad = grad.clamp(-grad_clip, grad_clip) grad = torch.nan_to_num(grad) if self.new_sds: targets = (latents - grad).detach() # if self.opt.grad_dyn: targets = ( (latents_noisy - (1 - self.alphas[t]) ** (0.5) * noise_pred) / self.alphas[t] ** (0.5) ).detach() if self.opt.get('mean_sds', False): loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='mean') # * grad_scale else: loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] # * grad_scale # B,4,H,W -> sum over 4,H,W else: # grad = grad * grad_scale # grad = torch.nan_to_num(grad) latents.backward(gradient=grad, retain_graph=True) loss = grad.abs().mean().detach() # B,4,H,W all mean if not enable_clip: loss_sds = loss if save_guidance_path: with torch.no_grad(): # save original input images = [] os.makedirs(os.path.dirname(save_guidance_path), exist_ok=True) timesteps = torch.arange(50, 1000, 300, dtype=torch.long, device=self.device) for t in timesteps: # ipdb.set_trace() if as_latent: pred_rgb_512 = self.decode_latents(latents, video_generation) latents_noisy = self.scheduler.add_noise(latents, noise, t) # pred noise latent_model_input = torch.cat([latents_noisy] * 2) down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=cn_text_embeddings, controlnet_cond=controlnet_conditioning_image, conditioning_scale=self.cn_scale, return_dict=False, ) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample ).sample # noise_pred = self.unet(latent_model_input, t, # encoder_hidden_states=text_embeddings).sample # perform guidance (high scale from paper!) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_text + guidance_scale * \ (noise_pred_text - noise_pred_uncond) pred_original_sample = self.decode_latents((latents_noisy - (1 - self.alphas[t]) ** (0.5) * noise_pred) / self.alphas[t] ** (0.5)) # visualize predicted denoised image # claforte: discuss this with Vikram!! result_hopefully_less_noisy_image = self.decode_latents(latents - w*grad_scale*(noise_pred - noise)) # visualize noisier image result_noisier_image = self.decode_latents(latents_noisy) viz_image_list = [pred_rgb_512, controlnet_conditioning_image.chunk(2,dim=0)[0], pred_original_sample, result_hopefully_less_noisy_image] viz_image_list = [rearrange(_img, "(b f) c h w -> b c f h w", f=num_frame) for _img in viz_image_list] image = torch.cat(viz_image_list, dim=0) images.append(image) n_rows = images[0].size(0) if video_generation: viz_images = torch.cat(images, dim=0).detach().mul(255).to(torch.uint8) # b,c,f,h,w
logger = logging.getLogger(__name__) def spherical_dist_loss(x, y): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def to_pil(x: torch.Tensor, **kwargs) -> Image.Image: return TVF.to_pil_image(make_grid(x, value_range=(0, 1), **kwargs)) def to_np_img(x: torch.Tensor) -> np.ndarray: return (x.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).round().astype(np.uint8) class SpecifyGradient(torch.autograd.Function): @staticmethod @custom_fwd def forward(ctx, input_tensor, gt_grad): ctx.save_for_backward(gt_grad) # we return a dummy value 1, which will be scaled by amp's scaler so we get the scale in backward. return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype) @staticmethod @custom_bwd def backward(ctx, grad_scale): gt_grad, = ctx.saved_tensors gt_grad = gt_grad * grad_scale return gt_grad, None def token_replace(prompt, negative, learned_embeds_path): # Set up automatic token replacement for prompt if '<token>' in prompt or '<token>' in negative: if learned_embeds_path is None: raise ValueError( '--learned_embeds_path must be specified when using <token>') tmp = list(torch.load(learned_embeds_path, map_location='cpu').keys()) if len(tmp) != 1: raise ValueError( 'Something is wrong with the dict passed in for --learned_embeds_path') token = tmp[0] prompt = prompt.replace('<token>', token) negative = negative.replace('<token>', token) logger.info(f'Prompt after replacing <token>: {prompt}') logger.info(f'Negative prompt after replacing <token>: {negative}') return prompt, negative @dataclass class UNet2DConditionOutput: # Not sure how to check what unet_traced.pt contains, and user wants. HalfTensor or FloatTensor sample: torch.HalfTensor def enable_vram(pipe): pipe.enable_sequential_cpu_offload() pipe.enable_vae_slicing() pipe.unet.to(memory_format=torch.channels_last) pipe.enable_attention_slicing(1) # pipe.enable_model_cpu_offload() def get_model_path(sd_version='2.1', clip_version='large', hf_key=None): if hf_key is not None: logger.info(f'[INFO] using hugging face custom model key: {hf_key}') sd_path = hf_key elif sd_version == '2.1': sd_path = "checkpoints/stable-diffusion-2-1-base" elif sd_version == '2.0': sd_path = "checkpoints/stable-diffusion-2-base" elif sd_version == '1.5': sd_path = "checkpoints/stable-diffusion-v1-5" elif 'zeroscope' in sd_version: sd_path = "checkpoints/" + sd_version else: raise ValueError( f'Stable-diffusion version {sd_version} not supported.') if clip_version == 'base': clip_path = "openai/clip-vit-base-patch32" else: clip_path = "openai/clip-vit-large-patch14" return sd_path, clip_path def check_within(x, low, high): if low is None and high is None: return False elif low is None: if x <= high: return True elif high is None: if x >= low: return True elif x >= low and x <= high: return True else: return False return False class ControlNetStableDiffusion(nn.Module): def __init__(self, opt, device, fp16, vram_O, sd_version='2.1', hf_key=None, t_range=[0.02, 0.98], use_clip=False, clip_version='base', clip_iterative=True, clip_t=0.4, **kwargs ): super().__init__() self.device = device self.vram_O = vram_O self.fp16 = fp16 self.opt = opt self.new_sds = opt.get('new_sds', False) self.cn_size = opt.cn_size self.cn_scale = opt.cn_scale logger.info(f'[INFO] loading stable diffusion...') sd_path, clip_path = get_model_path('1.5', clip_version, None) self.precision_t = torch.float16 if fp16 else torch.float32 self.sd_version = sd_path # Create model ## NOTE only sd 1.5 is supported sd_pipe = DiffusionPipeline.from_pretrained( sd_path, torch_dtype=self.precision_t, local_files_only=False) controlnet = ControlNetModel.from_pretrained(opt.cn_key, torch_dtype=self.precision_t, local_files_only=True) pipe = StableDiffusionControlNetImg2ImgPipeline( vae=sd_pipe.vae, text_encoder=sd_pipe.text_encoder, tokenizer=sd_pipe.tokenizer, unet=sd_pipe.unet, controlnet=controlnet, scheduler=sd_pipe.scheduler, safety_checker=sd_pipe.safety_checker, feature_extractor=sd_pipe.feature_extractor, requires_safety_checker=False ).to(device, self.precision_t) self.vae = pipe.vae self.tokenizer = pipe.tokenizer self.text_encoder = pipe.text_encoder self.unet = pipe.unet self.controlnet = pipe.controlnet if kwargs.get('learned_embeds_path', None) is not None: learned_embeds_path = kwargs['learned_embeds_path'] pipe.load_textual_inversion(learned_embeds_path) # ipdb.set_trace() if vram_O: # this will change device from gpu to other types (meta) enable_vram(pipe) else: if is_xformers_available(): pipe.enable_xformers_memory_efficient_attention() pipe.to(device) # if is_torch_version(">=", "2.0.0"): # pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) self.scheduler = DDIMScheduler.from_pretrained( sd_path, subfolder="scheduler", torch_dtype=self.precision_t, local_files_only=False) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.min_step = int(self.num_train_timesteps * t_range[0]) self.max_step = int(self.num_train_timesteps * t_range[1]) self.alphas = self.scheduler.alphas_cumprod.to( self.device) # for convenience logger.info(f'[INFO] loaded stable diffusion!') # for CLIP self.use_clip = use_clip if self.use_clip: #breakpoint() self.clip_model = CLIPModel.from_pretrained(clip_path).to(device) image_processor = CLIPProcessor.from_pretrained(clip_path).image_processor self.image_processor = transforms.Compose([ transforms.Resize((image_processor.crop_size['height'], image_processor.crop_size['width'])), transforms.Normalize(image_processor.image_mean, image_processor.image_std), ]) for p in self.clip_model.parameters(): p.requires_grad = False self.clip_iterative = clip_iterative self.clip_t = int(self.num_train_timesteps * clip_t) @torch.no_grad() def get_text_embeds(self, prompt): # Tokenize text and get embeddings text_input = self.tokenizer( prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] return text_embeddings @torch.no_grad() def get_all_text_embeds(self, prompt): # Tokenize text and get embeddings text_input = self.tokenizer( prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt') text_embeddings = self.text_encoder(text_input.input_ids.to(self.device)) # text_z = text_z / text_z.norm(dim=-1, keepdim=True) # return all text embeddings and class embeddings return torch.cat([text_embeddings[0], text_embeddings[1].unsqueeze(1)], dim=1) # @torch.no_grad() def get_clip_img_embeds(self, img): img = self.image_processor(img) image_z = self.clip_model.get_image_features(img) image_z = image_z / image_z.norm(dim=-1, keepdim=True) # normalize features return image_z def clip_loss(self, ref_z, pred_rgb): image_z = self.get_clip_img_embeds(pred_rgb) loss = spherical_dist_loss(image_z, ref_z) return loss def set_epoch(self, epoch): self.epoch = epoch def noise_sample(self, step=None, shape=1): return torch.randint(self.min_step, self.max_step+1, (shape,), dtype=torch.long) def train_step(self, text_embeddings, cn_text_embeddings, pred_rgb, cn_rgb, guidance_scale=100, as_latent=False, grad_clip=None, grad_scale=1.0, image_ref_clip=None, text_ref_clip=None, clip_guidance=100, clip_image_loss=False, density=None, save_guidance_path=None, step=None, first_frame=None, depth=None, # b,f,1,h,w, 0-1 start_from_zero=True, ): enable_clip = self.use_clip and clip_guidance > 0 and not as_latent enable_sds = True sd_size = self.cn_size cn_rgb = cn_rgb.detach() video_generation = False batch_size = pred_rgb.size(0) if pred_rgb.ndim == 5: # B,F,C,H,W video_generation = True num_frame = pred_rgb.size(1) pred_rgb = rearrange(pred_rgb, "b f c h w -> (b f) c h w") cn_rgb = rearrange(cn_rgb, "b f c h w -> (b f) c h w") # interp to 512x512 to be fed into vae. ## both should be 0-1 here pred_rgb_512 = F.interpolate( pred_rgb, (sd_size, sd_size), mode='bilinear', align_corners=False) controlnet_conditioning_image = F.interpolate( cn_rgb, (sd_size, sd_size), mode='bilinear', align_corners=False) controlnet_conditioning_image = torch.cat([controlnet_conditioning_image]*2, dim=0) # ipdb.set_trace() if text_embeddings.size(0) != num_frame*2: text_embeddings = torch.cat([text_embeddings[:1].repeat(num_frame, 1, 1), text_embeddings[1:].repeat(num_frame, 1, 1)], dim=0) if cn_text_embeddings.size(0) != num_frame*2: cn_text_embeddings = torch.cat([cn_text_embeddings[:1].repeat(num_frame, 1, 1), cn_text_embeddings[1:].repeat(num_frame, 1, 1)], dim=0) # ipdb.set_trace() # encode image into latents with vae, requires grad! latents = self.encode_imgs(pred_rgb_512) t = self.noise_sample(step=step, shape=batch_size).to(self.device) ## here should we keep a same noise t each frame? Not very necessary? if enable_clip and self.clip_iterative: if t > self.clip_t: enable_clip = False else: enable_sds = False # predict the noise residual with unet, NO grad! with torch.no_grad(): # add noise noise = torch.randn_like(latents) latents_noisy = self.scheduler.add_noise(latents, noise, t) # pred noise latent_model_input = torch.cat([latents_noisy] * 2) down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=cn_text_embeddings, controlnet_cond=controlnet_conditioning_image, conditioning_scale=self.cn_scale, return_dict=False, ) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # perform guidance (high scale from paper!) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_text + guidance_scale * \ (noise_pred_text - noise_pred_uncond) grad_clipd = 0 # ipdb.set_trace() w = (1 - self.alphas[t])[:, None, None, None] # 1,1,1,1 # w = self.alphas[t] ** 0.5 * (1 - self.alphas[t]) # grad_sds = w * (noise_pred - noise) grad_sds = grad_scale * w * (noise_pred - noise) loss_sds = grad_sds.abs().mean().detach() grad_clipd = 0. loss_clipd = 0. grad = grad_clipd + grad_sds if grad_clip is not None: grad = grad.clamp(-grad_clip, grad_clip) grad = torch.nan_to_num(grad) if self.new_sds: targets = (latents - grad).detach() # if self.opt.grad_dyn: targets = ( (latents_noisy - (1 - self.alphas[t]) ** (0.5) * noise_pred) / self.alphas[t] ** (0.5) ).detach() if self.opt.get('mean_sds', False): loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='mean') # * grad_scale else: loss = 0.5 * F.mse_loss(latents.float(), targets, reduction='sum') / latents.shape[0] # * grad_scale # B,4,H,W -> sum over 4,H,W else: # grad = grad * grad_scale # grad = torch.nan_to_num(grad) latents.backward(gradient=grad, retain_graph=True) loss = grad.abs().mean().detach() # B,4,H,W all mean if not enable_clip: loss_sds = loss if save_guidance_path: with torch.no_grad(): # save original input images = [] os.makedirs(os.path.dirname(save_guidance_path), exist_ok=True) timesteps = torch.arange(50, 1000, 300, dtype=torch.long, device=self.device) for t in timesteps: # ipdb.set_trace() if as_latent: pred_rgb_512 = self.decode_latents(latents, video_generation) latents_noisy = self.scheduler.add_noise(latents, noise, t) # pred noise latent_model_input = torch.cat([latents_noisy] * 2) down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=cn_text_embeddings, controlnet_cond=controlnet_conditioning_image, conditioning_scale=self.cn_scale, return_dict=False, ) noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample ).sample # noise_pred = self.unet(latent_model_input, t, # encoder_hidden_states=text_embeddings).sample # perform guidance (high scale from paper!) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_text + guidance_scale * \ (noise_pred_text - noise_pred_uncond) pred_original_sample = self.decode_latents((latents_noisy - (1 - self.alphas[t]) ** (0.5) * noise_pred) / self.alphas[t] ** (0.5)) # visualize predicted denoised image # claforte: discuss this with Vikram!! result_hopefully_less_noisy_image = self.decode_latents(latents - w*grad_scale*(noise_pred - noise)) # visualize noisier image result_noisier_image = self.decode_latents(latents_noisy) viz_image_list = [pred_rgb_512, controlnet_conditioning_image.chunk(2,dim=0)[0], pred_original_sample, result_hopefully_less_noisy_image] viz_image_list = [rearrange(_img, "(b f) c h w -> b c f h w", f=num_frame) for _img in viz_image_list] image = torch.cat(viz_image_list, dim=0) images.append(image) n_rows = images[0].size(0) if video_generation: viz_images = torch.cat(images, dim=0).detach().mul(255).to(torch.uint8) # b,c,f,h,w
save_tensor2image(rearrange(viz_images, 'b c f h w -> b f c h w'), save_guidance_path, n_rows=n_rows)
1
2023-11-23 10:34:08+00:00
16k
alexzhou907/DreamPropeller
threestudio/systems/base.py
[ { "identifier": "Exporter", "path": "threestudio/models/exporters/base.py", "snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError" }, { "identifier": "ExporterOutput", "path": "threestudio/models/exporters/base.py", "snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]" }, { "identifier": "parse_optimizer", "path": "threestudio/systems/utils.py", "snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim" }, { "identifier": "parse_scheduler", "path": "threestudio/systems/utils.py", "snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler" }, { "identifier": "Updateable", "path": "threestudio/utils/base.py", "snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False, exceptions=[]\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\") or any([ex in attr for ex in exceptions]):\n continue\n \n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def do_update_step_end(self, epoch: int, global_step: int, exceptions=[]):\n for attr in self.__dir__():\n if attr.startswith(\"_\") or any([ex in attr for ex in exceptions]):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)\n self.update_step_end(epoch, global_step)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass\n\n def update_step_end(self, epoch: int, global_step: int):\n pass" }, { "identifier": "update_end_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)" }, { "identifier": "update_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)" }, { "identifier": "parse_structured", "path": "threestudio/utils/config.py", "snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg" }, { "identifier": "C", "path": "threestudio/utils/misc.py", "snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value" }, { "identifier": "cleanup", "path": "threestudio/utils/misc.py", "snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()" }, { "identifier": "get_device", "path": "threestudio/utils/misc.py", "snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")" }, { "identifier": "load_module_weights", "path": "threestudio/utils/misc.py", "snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]" }, { "identifier": "SaverMixin", "path": "threestudio/utils/saving.py", "snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n fs = 30\n font = ImageFont.truetype(\"load/times-new-roman.ttf\", fs)\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2+30, i*fs + 1 +20), f\"{text}\", white, font=font)\n draw.text((0+30, i*fs + 1 +20), f\"{text}\", white, font=font)\n draw.text((2+30, i*fs - 1 +20), f\"{text}\", white, font=font)\n draw.text((0+30, i*fs - 1 +20), f\"{text}\", white, font=font)\n draw.text((1+30, i*fs +20), f\"{text}\", black, font=font)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self, *args, **kwargs) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" } ]
import os import pytorch_lightning as pl import torch.nn.functional as F import threestudio import torch import numpy as np import copy from dataclasses import dataclass, field from threestudio.models.exporters.base import Exporter, ExporterOutput from threestudio.systems.utils import parse_optimizer, parse_scheduler from threestudio.utils.base import ( Updateable, update_end_if_possible, update_if_possible, ) from threestudio.utils.config import parse_structured from threestudio.utils.misc import C, cleanup, get_device, load_module_weights from threestudio.utils.saving import SaverMixin from threestudio.utils.typing import * from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.utils.config import load_config, parse_structured
12,828
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, device=get_device(), resumed=False, configure=True) -> None: super().__init__()
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, device=get_device(), resumed=False, configure=True) -> None: super().__init__()
self.cfg = parse_structured(self.Config, cfg)
7
2023-11-27 23:39:49+00:00
16k
CineMingle/CineMingle
Movie_Data_Capture.py
[ { "identifier": "get_data_from_json", "path": "scraper.py", "snippet": "def get_data_from_json(\n file_number: str,\n open_cc: opencc.OpenCC,\n specified_source: str, specified_url: str) -> typing.Optional[dict]:\n \n # iterate through all services and fetch the data 从网站上查询片名解析JSON返回元数据\n # :param file_number: 影片名称\n # :param open_cc: 简繁转换器\n # :param specified_source: 指定的媒体数据源\n # :param specified_url: 指定的数据查询地址, 目前未使用\n # :return 给定影片名称的具体信息\n \n try:\n actor_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_actor.xml'))\n info_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_info.xml'))\n except:\n actor_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n info_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n\n conf = config.getInstance()\n # default fetch order list, from the beginning to the end\n sources = conf.sources()\n\n # TODO 准备参数\n # - 清理 ADC_function, webcrawler\n proxies: dict = None\n config_proxy = conf.proxy()\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n\n # javdb website logic\n # javdb have suffix\n javdb_sites = conf.javdb_sites().split(',')\n for i in javdb_sites:\n javdb_sites[javdb_sites.index(i)] = \"javdb\" + i\n javdb_sites.append(\"javdb\")\n # 不加载过期的cookie,javdb登录界面显示为7天免登录,故假定cookie有效期为7天\n has_valid_cookie = False\n for cj in javdb_sites:\n javdb_site = cj\n cookie_json = javdb_site + '.json'\n cookies_dict, cookies_filepath = load_cookies(cookie_json)\n if isinstance(cookies_dict, dict) and isinstance(cookies_filepath, str):\n cdays = file_modification_days(cookies_filepath)\n if cdays < 7:\n javdb_cookies = cookies_dict\n has_valid_cookie = True\n break\n elif cdays != 9999:\n print(\n f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')\n if not has_valid_cookie:\n # get real random site from javdb_sites, because random is not really random when the seed value is known\n # 已经是没有这些随机数了\n # javdb_site = secrets.choice(javdb_sites)\n javdb_site = None\n javdb_cookies = None\n\n ca_cert = None\n if conf.cacert_file():\n ca_cert = conf.cacert_file()\n\n json_data = search(file_number, sources, proxies=proxies, verify=ca_cert,\n dbsite=javdb_site, dbcookies=javdb_cookies,\n morestoryline=conf.is_storyline(),\n specifiedSource=specified_source, specifiedUrl=specified_url,\n debug = conf.debug())\n # Return if data not found in all sources\n if not json_data:\n print('[-]Movie Number not found!')\n return None\n\n # 增加number严格判断,避免提交任何number,总是返回\"本橋実来 ADZ335\",这种返回number不一致的数据源故障\n # 目前选用number命名规则是javdb.com Domain Creation Date: 2013-06-19T18:34:27Z\n # 然而也可以跟进关注其它命名规则例如airav.wiki Domain Creation Date: 2019-08-28T07:18:42.0Z\n # 如果将来javdb.com命名规则下不同Studio出现同名碰撞导致无法区分,可考虑更换规则,更新相应的number分析和抓取代码。\n if str(json_data.get('number')).upper() != file_number.upper():\n try:\n if json_data.get('allow_number_change'):\n pass\n except:\n print('[-]Movie number has changed! [{}]->[{}]'.format(file_number, str(json_data.get('number'))))\n return None\n\n # ================================================网站规则添加结束================================================\n\n if json_data.get('title') == '':\n print('[-]Movie Number or Title not found!')\n return None\n\n title = json_data.get('title')\n actor_list = str(json_data.get('actor')).strip(\"[ ]\").replace(\"'\", '').split(',') # 字符串转列表\n actor_list = [actor.strip() for actor in actor_list] # 去除空白\n director = json_data.get('director')\n release = json_data.get('release')\n number = json_data.get('number')\n studio = json_data.get('studio')\n source = json_data.get('source')\n runtime = json_data.get('runtime')\n outline = json_data.get('outline')\n label = json_data.get('label')\n series = json_data.get('series')\n year = json_data.get('year')\n\n if json_data.get('cover_small'):\n cover_small = json_data.get('cover_small')\n else:\n cover_small = ''\n\n if json_data.get('trailer'):\n trailer = json_data.get('trailer')\n else:\n trailer = ''\n\n if json_data.get('extrafanart'):\n extrafanart = json_data.get('extrafanart')\n else:\n extrafanart = ''\n\n imagecut = json_data.get('imagecut')\n tag = str(json_data.get('tag')).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '').split(',') # 字符串转列表 @\n while 'XXXX' in tag:\n tag.remove('XXXX')\n while 'xxx' in tag:\n tag.remove('xxx')\n if json_data['source'] =='pissplay': # pissplay actor为英文名,不用去除空格\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '')\n else:\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '')\n\n # if imagecut == '3':\n # DownloadFileWithFilename()\n\n # ====================处理异常字符====================== #\\/:*?\"<>|\n actor = special_characters_replacement(actor)\n actor_list = [special_characters_replacement(a) for a in actor_list]\n title = special_characters_replacement(title)\n label = special_characters_replacement(label)\n outline = special_characters_replacement(outline)\n series = special_characters_replacement(series)\n studio = special_characters_replacement(studio)\n director = special_characters_replacement(director)\n tag = [special_characters_replacement(t) for t in tag]\n release = release.replace('/', '-')\n tmpArr = cover_small.split(',')\n if len(tmpArr) > 0:\n cover_small = tmpArr[0].strip('\\\"').strip('\\'')\n # ====================处理异常字符 END================== #\\/:*?\"<>|\n\n # 返回处理后的json_data\n json_data['title'] = title\n json_data['original_title'] = title\n json_data['actor'] = actor\n json_data['release'] = release\n json_data['cover_small'] = cover_small\n json_data['tag'] = tag\n json_data['year'] = year\n json_data['actor_list'] = actor_list\n json_data['trailer'] = trailer\n json_data['extrafanart'] = extrafanart\n json_data['label'] = label\n json_data['outline'] = outline\n json_data['series'] = series\n json_data['studio'] = studio\n json_data['director'] = director\n\n if conf.is_translate():\n translate_values = conf.translate_values().split(\",\")\n for translate_value in translate_values:\n if json_data[translate_value] == \"\":\n continue\n if translate_value == \"title\":\n title_dict = json.loads(\n (Path.home() / '.local' / 'share' / 'mdc' / 'c_number.json').read_text(encoding=\"utf-8\"))\n try:\n json_data[translate_value] = title_dict[number]\n continue\n except:\n pass\n if conf.get_translate_engine() == \"azure\":\n t = translate(\n json_data[translate_value],\n target_language=\"zh-Hans\",\n engine=conf.get_translate_engine(),\n key=conf.get_translate_key(),\n )\n else:\n if len(json_data[translate_value]):\n if type(json_data[translate_value]) == str:\n json_data[translate_value] = special_characters_replacement(json_data[translate_value])\n json_data[translate_value] = translate(json_data[translate_value])\n else:\n for i in range(len(json_data[translate_value])):\n json_data[translate_value][i] = special_characters_replacement(\n json_data[translate_value][i])\n list_in_str = \",\".join(json_data[translate_value])\n json_data[translate_value] = translate(list_in_str).split(',')\n\n if open_cc:\n cc_vars = conf.cc_convert_vars().split(\",\")\n ccm = conf.cc_convert_mode()\n\n def convert_list(mapping_data, language, vars):\n total = []\n for i in vars:\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")) != 0:\n i = mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")[0]\n total.append(i)\n return total\n\n def convert(mapping_data, language, vars):\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)) != 0:\n return mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)[0]\n else:\n raise IndexError('keyword not found')\n\n for cc in cc_vars:\n if json_data[cc] == \"\" or len(json_data[cc]) == 0:\n continue\n if cc == \"actor\":\n try:\n if ccm == 1:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_cn\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_cn\", json_data['actor'])\n elif ccm == 2:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_tw\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_tw\", json_data['actor'])\n elif ccm == 3:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"jp\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"jp\", json_data['actor'])\n except:\n json_data['actor_list'] = [open_cc.convert(aa) for aa in json_data['actor_list']]\n json_data['actor'] = open_cc.convert(json_data['actor'])\n elif cc == \"tag\":\n try:\n if ccm == 1:\n json_data[cc] = convert_list(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert_list(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert_list(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n except:\n json_data[cc] = [open_cc.convert(t) for t in json_data[cc]]\n else:\n try:\n if ccm == 1:\n json_data[cc] = convert(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n except IndexError:\n json_data[cc] = open_cc.convert(json_data[cc])\n except:\n pass\n\n naming_rule = \"\"\n original_naming_rule = \"\"\n for i in conf.naming_rule().split(\"+\"):\n if i not in json_data:\n naming_rule += i.strip(\"'\").strip('\"')\n original_naming_rule += i.strip(\"'\").strip('\"')\n else:\n item = json_data.get(i)\n naming_rule += item if type(item) is not list else \"&\".join(item)\n # PATCH:处理[title]存在翻译的情况,后续NFO文件的original_name只会直接沿用naming_rule,这导致original_name非原始名\n # 理应在翻译处处理 naming_rule和original_naming_rule\n if i == 'title':\n item = json_data.get('original_title')\n original_naming_rule += item if type(item) is not list else \"&\".join(item)\n\n json_data['naming_rule'] = naming_rule\n json_data['original_naming_rule'] = original_naming_rule\n return json_data" }, { "identifier": "file_modification_days", "path": "ADC_function.py", "snippet": "def file_modification_days(filename: str) -> int:\n \"\"\"\n 文件修改时间距此时的天数\n \"\"\"\n mfile = Path(filename)\n if not mfile.is_file():\n return 9999\n mtime = int(mfile.stat().st_mtime)\n now = int(time.time())\n days = int((now - mtime) / (24 * 60 * 60))\n if days < 0:\n return 9999\n return days" }, { "identifier": "get_html", "path": "ADC_function.py", "snippet": "def get_html(url, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None, json_headers=None):\n \"\"\"\n 网页请求核心函数\n \"\"\"\n verify = config.getInstance().cacert_file()\n config_proxy = config.getInstance().proxy()\n errors = \"\"\n\n headers = {\"User-Agent\": ua or G_USER_AGENT} # noqa\n if json_headers is not None:\n headers.update(json_headers)\n\n for i in range(config_proxy.retry):\n try:\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, proxies=proxies,\n verify=verify,\n cookies=cookies)\n else:\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, cookies=cookies)\n\n if return_type == \"object\":\n return result\n elif return_type == \"content\":\n return result.content\n else:\n result.encoding = encoding or result.apparent_encoding\n return result.text\n except Exception as e:\n print(\"[-]Connect retry {}/{}\".format(i + 1, config_proxy.retry))\n errors = str(e)\n if \"getaddrinfo failed\" in errors:\n print(\"[-]Connect Failed! Please Check your proxy config\")\n debug = config.getInstance().debug()\n if debug:\n print(\"[-]\" + errors)\n else:\n print(\"[-]\" + errors)\n print('[-]Connect Failed! Please check your Proxy or Network!')\n raise Exception('Connect Failed')" }, { "identifier": "parallel_download_files", "path": "ADC_function.py", "snippet": "def parallel_download_files(dn_list: typing.Iterable[typing.Sequence], parallel: int = 0, json_headers=None):\n \"\"\"\n download files in parallel 多线程下载文件\n\n 用法示例: 2线程同时下载两个不同文件,并保存到不同路径,路径目录可未创建,但需要具备对目标目录和文件的写权限\n parallel_download_files([\n ('https://site1/img/p1.jpg', 'C:/temp/img/p1.jpg'),\n ('https://site2/cover/n1.xml', 'C:/tmp/cover/n1.xml')\n ])\n\n :dn_list: 可以是 tuple或者list: ((url1, save_fullpath1),(url2, save_fullpath2),) fullpath可以是str或Path\n :parallel: 并行下载的线程池线程数,为0则由函数自己决定\n \"\"\"\n mp_args = []\n for url, fullpath in dn_list:\n if url and isinstance(url, str) and url.startswith('http') \\\n and fullpath and isinstance(fullpath, (str, Path)) and len(str(fullpath)):\n fullpath = Path(fullpath)\n fullpath.parent.mkdir(parents=True, exist_ok=True)\n mp_args.append((url, fullpath, json_headers))\n if not len(mp_args):\n return []\n if not isinstance(parallel, int) or parallel not in range(1, 200):\n parallel = min(5, len(mp_args))\n with ThreadPoolExecutor(parallel) as pool:\n results = list(pool.map(download_one_file, mp_args))\n return results" }, { "identifier": "get_number", "path": "number_parser.py", "snippet": "def get_number(debug: bool, file_path: str) -> str:\n \"\"\"\n 从文件路径中提取番号 from number_parser import get_number\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/[脸肿字幕组][PoRO]牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」[720p][x264_aac].mp4\")\n '牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829-C.mp4\")\n 'snis-829'\n \"\"\"\n filepath = os.path.basename(file_path)\n # debug True 和 False 两块代码块合并,原因是此模块及函数只涉及字符串计算,没有IO操作,debug on时输出导致异常信息即可\n try:\n file_number = get_number_by_dict(filepath)\n if file_number:\n return file_number\n elif '字幕组' in filepath or 'SUB' in filepath.upper() or re.match(r'[\\u30a0-\\u30ff]+', filepath):\n filepath = G_spat.sub(\"\", filepath)\n filepath = re.sub(\"\\[.*?\\]\",\"\",filepath)\n filepath = filepath.replace(\".chs\", \"\").replace(\".cht\", \"\")\n file_number = str(re.findall(r'(.+?)\\.', filepath)).strip(\" [']\")\n return file_number\n elif '-' in filepath or '_' in filepath: # 普通提取番号 主要处理包含减号-和_的番号\n filepath = G_spat.sub(\"\", filepath)\n filename = str(re.sub(\"\\[\\d{4}-\\d{1,2}-\\d{1,2}\\] - \", \"\", filepath)) # 去除文件名中时间\n lower_check = filename.lower()\n if 'fc2' in lower_check:\n filename = lower_check.replace('--', '-').replace('_', '-').upper()\n filename = re.sub(\"[-_]cd\\d{1,2}\", \"\", filename, flags=re.IGNORECASE)\n if not re.search(\"-|_\", filename): # 去掉-CD1之后再无-的情况,例如n1012-CD1.wmv\n return str(re.search(r'\\w+', filename[:filename.find('.')], re.A).group())\n file_number = os.path.splitext(filename)\n print(file_number)\n filename = re.search(r'[\\w\\-_]+', filename, re.A)\n if filename:\n file_number = str(filename.group())\n else:\n file_number = file_number[0]\n file_number = re.sub(\"(-|_)c$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)uc$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)u$\", \"\", file_number, flags=re.IGNORECASE)\n if re.search(\"\\d+ch$\", file_number, flags=re.I):\n file_number = file_number[:-2]\n return file_number.upper()\n else: # 提取不含减号-的番号,FANZA CID\n # 欧美番号匹配规则\n oumei = re.search(r'[a-zA-Z]+\\.\\d{2}\\.\\d{2}\\.\\d{2}', filepath)\n if oumei:\n return oumei.group()\n try:\n return str(\n re.findall(r'(.+?)\\.',\n str(re.search('([^<>/\\\\\\\\|:\"\"\\\\*\\\\?]+)\\\\.\\\\w+$', filepath).group()))).strip(\n \"['']\").replace('_', '-')\n except:\n return str(re.search(r'(.+?)\\.', filepath)[0])\n except Exception as e:\n if debug:\n print(f'[-]Number Parser exception: {e} [{file_path}]')\n return None" }, { "identifier": "core_main", "path": "core.py", "snippet": "def core_main(movie_path, number_th, oCC, specified_source=None, specified_url=None):\n conf = config.getInstance()\n # =======================================================================初始化所需变量\n multi_part = False\n part = ''\n leak_word = ''\n c_word = ''\n cn_sub = False\n liuchu = False\n hack = False\n hack_word = ''\n _4k = False\n\n # 下面被注释的变量不需要\n # rootpath = os.getcwd\n number = number_th\n json_data = get_data_from_json(number, oCC, specified_source, specified_url) # 定义番号\n\n # Return if blank dict returned (data not found)\n if not json_data:\n moveFailedFolder(movie_path)\n return\n\n if json_data[\"number\"] != number:\n # fix issue #119\n # the root cause is we normalize the search id\n # print_files() will use the normalized id from website,\n # but paste_file_to_folder() still use the input raw search id\n # so the solution is: use the normalized search id\n number = json_data[\"number\"]\n imagecut = json_data.get('imagecut')\n tag = json_data.get('tag')\n # =======================================================================判断-C,-CD后缀\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n multi_part = True\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n\n # 判断是否无码\n unce = json_data.get('无码')\n uncensored = int(unce) if isinstance(unce, bool) else int(is_uncensored(number))\n\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n liuchu = '流出'\n leak = True\n leak_word = '-无码流出' # 流出影片后缀\n else:\n leak = False\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n if '4k'.upper() in str(movie_path).upper() or '4k' in movie_path:\n _4k = True\n\n # 判断是否4k\n if '4K' in tag:\n tag.remove('4K') # 从tag中移除'4K'\n\n # 判断是否为无码破解\n if '无码破解' in tag:\n tag.remove('无码破解') # 从tag中移除'无码破解'\n\n # try:\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n\n # 调试模式检测\n if conf.debug():\n debug_print(json_data)\n\n # 创建文件夹\n # path = create_folder(rootpath + '/' + conf.success_folder(), json_data.get('location_rule'), json_data)\n\n cover = json_data.get('cover')\n ext = image_ext(cover)\n\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{number}{leak_word}{c_word}{hack_word}-fanart{ext}\"\n poster_path = f\"{number}{leak_word}{c_word}{hack_word}-poster{ext}\"\n thumb_path = f\"{number}{leak_word}{c_word}{hack_word}-thumb{ext}\"\n\n # main_mode\n # 1: 刮削模式 / Scraping mode\n # 2: 整理模式 / Organizing mode\n # 3:不改变路径刮削\n if conf.main_mode() == 1:\n # 创建文件夹\n path = create_folder(json_data)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, thumb_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 移动电影\n paste_file_to_folder(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_status = move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n if move_status:\n cn_sub = True\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, thumb_path), cn_sub, leak, uncensored,\n hack, _4k)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path, tag,\n json_data.get('actor_list'), liuchu, uncensored, hack, hack_word\n , _4k, fanart_path, poster_path, thumb_path)\n\n elif conf.main_mode() == 2:\n # 创建文件夹\n path = create_folder(json_data)\n # 移动文件\n paste_file_to_folder_mode2(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n elif conf.main_mode() == 3:\n path = str(Path(movie_path).parent)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, fanart_path), cn_sub, leak, uncensored, hack,\n _4k)\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path,\n tag, json_data.get('actor_list'), liuchu, uncensored, hack, hack_word, _4k, fanart_path, poster_path,\n thumb_path)" }, { "identifier": "core_main_no_net_op", "path": "core.py", "snippet": "def core_main_no_net_op(movie_path, number):\n conf = config.getInstance()\n part = ''\n leak_word = ''\n leak = False\n c_word = ''\n cn_sub = False\n hack = False\n hack_word = ''\n _4k = False\n imagecut = 1\n multi = False\n part = ''\n path = str(Path(movie_path).parent)\n\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n multi = True\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path or \".chs\" in movie_path or '.cht' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n uncensored = True if is_uncensored(number) else 0\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n leak_word = '-无码流出' # 无码流出影片后缀\n leak = True\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n # try:\n\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n prestr = f\"{number}{leak_word}{c_word}{hack_word}\"\n\n full_nfo = Path(path) / f\"{prestr}{part}.nfo\"\n if full_nfo.is_file():\n if full_nfo.read_text(encoding='utf-8').find(r'<tag>无码</tag>') >= 0:\n uncensored = True\n try:\n nfo_xml = etree.parse(full_nfo)\n nfo_fanart_path = nfo_xml.xpath('//fanart/text()')[0]\n ext = Path(nfo_fanart_path).suffix\n except:\n return\n else:\n return\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{prestr}-fanart{ext}\"\n poster_path = f\"{prestr}-poster{ext}\"\n thumb_path = f\"{prestr}-thumb{ext}\"\n full_fanart_path = os.path.join(path, fanart_path)\n full_poster_path = os.path.join(path, poster_path)\n full_thumb_path = os.path.join(path, thumb_path)\n\n if not all(os.path.isfile(f) for f in (full_fanart_path, full_thumb_path)):\n return\n\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n if conf.is_watermark():\n add_mark(full_poster_path, full_thumb_path, cn_sub, leak, uncensored, hack, _4k)\n\n if multi and conf.jellyfin_multi_part_fanart():\n linkImage(path, number, part, leak_word, c_word, hack_word, ext)" }, { "identifier": "moveFailedFolder", "path": "core.py", "snippet": "def moveFailedFolder(filepath):\n conf = config.getInstance()\n failed_folder = conf.failed_folder()\n link_mode = conf.link_mode()\n # 模式3或软连接,改为维护一个失败列表,启动扫描时加载用于排除该路径,以免反复处理\n # 原先的创建软连接到失败目录,并不直观,不方便找到失败文件位置,不如直接记录该文件路径\n if conf.main_mode() == 3 or link_mode:\n ftxt = os.path.abspath(os.path.join(failed_folder, 'failed_list.txt'))\n print(\"[-]Add to Failed List file, see '%s'\" % ftxt)\n with open(ftxt, 'a', encoding='utf-8') as flt:\n flt.write(f'{filepath}\\n')\n elif conf.failed_move() and not link_mode:\n failed_name = os.path.join(failed_folder, os.path.basename(filepath))\n mtxt = os.path.abspath(os.path.join(failed_folder, 'where_was_i_before_being_moved.txt'))\n print(\"'[-]Move to Failed output folder, see '%s'\" % mtxt)\n with open(mtxt, 'a', encoding='utf-8') as wwibbmt:\n tmstr = datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n wwibbmt.write(f'{tmstr} FROM[{filepath}]TO[{failed_name}]\\n')\n try:\n if os.path.exists(failed_name):\n print('[-]File Exists while moving to FailedFolder')\n return\n shutil.move(filepath, failed_name)\n except:\n print('[-]File Moving to FailedFolder unsuccessful!')" }, { "identifier": "debug_print", "path": "core.py", "snippet": "def debug_print(data: json):\n try:\n print(\"[+] ------- DEBUG INFO -------\")\n for i, v in data.items():\n if i == 'outline':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'characters')\n continue\n if i == 'actor_photo' or i == 'year':\n continue\n if i == 'extrafanart':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'links')\n continue\n print(f'[+] - {i:<{cn_space(i, 19)}} : {v}')\n\n print(\"[+] ------- DEBUG INFO -------\")\n except:\n pass" } ]
import argparse import json import os import random import re import sys import time import shutil import typing import urllib3 import signal import platform import config from datetime import datetime, timedelta from lxml import etree from pathlib import Path from opencc import OpenCC from scraper import get_data_from_json from ADC_function import file_modification_days, get_html, parallel_download_files from number_parser import get_number from core import core_main, core_main_no_net_op, moveFailedFolder, debug_print
13,417
is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue number = get_number(False, f.stem) if not number: continue skip_numbers.add(number.lower()) rm_list = [] for f in total: n_number = get_number(False, os.path.basename(f)) if n_number and n_number.lower() in skip_numbers: rm_list.append(f) for f in rm_list: total.remove(f) if debug: print(f"[!]Skip file successfully processed within {nfo_skip_days} days: '{f}'") if len(rm_list): print( f"[!]Skip {len(rm_list)} movies in success folder '{success_folder}' who's .nfo modified within {nfo_skip_days} days.") return total def create_failed_folder(failed_folder: str): """ 新建failed文件夹 """ if not os.path.exists(failed_folder): try: os.makedirs(failed_folder) except: print(f"[-]Fatal error! Can not make folder '{failed_folder}'") os._exit(0) def rm_empty_folder(path): """ Recursively removes empty folders from a given path. This function is useful for cleaning up the directory structure by removing folders that no longer contain any files. :param path: The path where empty folders will be searched for and removed. """ abspath = os.path.abspath(path) deleted = set() for current_dir, subdirs, files in os.walk(abspath, topdown=False): try: still_has_subdirs = any(_ for subdir in subdirs if os.path.join(current_dir, subdir) not in deleted) if not any(files) and not still_has_subdirs and not os.path.samefile(path, current_dir): os.rmdir(current_dir) deleted.add(current_dir) print('[+]Deleting empty folder', current_dir) except: pass def create_data_and_move(movie_path: str, zero_op: bool, no_net_op: bool, oCC): """ Processes a movie file, generates necessary data, and moves the file to an appropriate directory based on the outcome. This function is central to the application's file processing logic, including scraping, organizing, and error handling. :param movie_path: Path of the movie file to be processed. :param zero_op: A boolean flag indicating whether to perform a dry run (no actual file operations). :param no_net_op: A boolean flag to indicate whether network operations are to be skipped. :param oCC: An OpenCC instance for language conversion, if required. """ # Normalized number, eg: 111xxx-222.mp4 -> xxx-222.mp4 skip_file_names = config.getInstance().skip_file_names() debug = config.getInstance().debug() n_number = get_number(debug, os.path.basename(movie_path)) movie_path = os.path.abspath(movie_path) # print(movie_path) for skip_name in skip_file_names: if skip_name in movie_path: print('[+]Skipping file:{}'.format(movie_path)) return if debug is True: print(f"[!] [{n_number}] As Number Processing for '{movie_path}'") if zero_op: return if n_number: if no_net_op: core_main_no_net_op(movie_path, n_number) else: core_main(movie_path, n_number, oCC) else: print("[-] number empty ERROR")
def check_update(local_version): """ Check for updates by comparing the local version of the application with the latest version available on GitHub. It fetches the latest release information from GitHub and compares the version numbers. If a new version is available, it prints out the update information. :param local_version: The current local version of the application. """ htmlcode = get_html("https://api.github.com/repos/CineMingle/CineMingle/releases/latest") data = json.loads(htmlcode) remote = int(data["tag_name"].replace(".", "")) local_version = int(local_version.replace(".", "")) if local_version < remote: print("[*]" + ("* New update " + str(data["tag_name"]) + " *").center(54)) print("[*]" + "↓ Download ↓".center(54)) print("[*]https://github.com/CineMingle/CineMingle/releases") print("[*]======================================================") def argparse_function(ver: str) -> typing.Tuple[str, str, str, str, bool, bool, str, str]: """ Parses command-line arguments and returns the parsed values. It sets up the argument parser with various options for the application and returns the parsed arguments and their values. It also loads configuration from a config file. :param ver: The version of the application, used for the version argument. :return: A tuple containing various parsed arguments and flags. """ conf = config.getInstance() parser = argparse.ArgumentParser(epilog=f"Load Config file '{conf.ini_path}'.") parser.add_argument("file", default='', nargs='?', help="Single Movie file path.") parser.add_argument("-p", "--path", default='movies', nargs='?', help="Analysis folder path.") parser.add_argument("-m", "--main-mode", default='', nargs='?', help="Main mode. 1:Scraping 2:Organizing 3:Scraping in analysis folder") parser.add_argument("-n", "--number", default='', nargs='?', help="Custom file number of single movie file.") # parser.add_argument("-C", "--config", default='config.ini', nargs='?', help="The config file Path.") parser.add_argument("-L", "--link-mode", default='', nargs='?', help="Create movie file link. 0:moving movie file, do not create link 1:soft link 2:try hard link first") default_logdir = str(Path.home() / '.mlogs') parser.add_argument("-o", "--log-dir", dest='logdir', default=default_logdir, nargs='?', help=f"""Duplicate stdout and stderr to logfiles in logging folder, default on. default folder for current user: '{default_logdir}'. Change default folder to an empty file, or use --log-dir= to turn log off.""") parser.add_argument("-q", "--regex-query", dest='regexstr', default='', nargs='?', help="python re module regex filepath filtering.") parser.add_argument("-d", "--nfo-skip-days", dest='days', default='', nargs='?', help="Override nfo_skip_days value in config.") parser.add_argument("-c", "--stop-counter", dest='cnt', default='', nargs='?', help="Override stop_counter value in config.") parser.add_argument("-R", "--rerun-delay", dest='delaytm', default='', nargs='?', help="Delay (eg. 1h10m30s or 60 (second)) time and rerun, until all movies proceed. Note: stop_counter value in config or -c must none zero.") parser.add_argument("-i", "--ignore-failed-list", action="store_true", help="Ignore failed list '{}'".format( os.path.join(os.path.abspath(conf.failed_folder()), 'failed_list.txt'))) parser.add_argument("-a", "--auto-exit", action="store_true", help="Auto exit after program complete") parser.add_argument("-g", "--debug", action="store_true", help="Turn on debug mode to generate diagnostic log for issue report.") parser.add_argument("-N", "--no-network-operation", action="store_true", help="No network query, do not get metadata, for cover cropping purposes, only takes effect when main mode is 3.") parser.add_argument("-w", "--website", dest='site', default='', nargs='?', help="Override [priority]website= in config.") parser.add_argument("-D", "--download-images", dest='dnimg', action="store_true", help="Override [common]download_only_missing_images=0 force invoke image downloading.") parser.add_argument("-C", "--config-override", dest='cfgcmd', action='append', nargs=1, help="Common use config override. Grammar: section:key=value[;[section:]key=value] eg. 'de:s=1' or 'debug_mode:switch=1' override[debug_mode]switch=1 Note:this parameters can be used multiple times") parser.add_argument("-z", "--zero-operation", dest='zero_op', action="store_true", help="""Only show job list of files and numbers, and **NO** actual operation is performed. It may help you correct wrong numbers before real job.""") parser.add_argument("-v", "--version", action="version", version=ver) parser.add_argument("-s", "--search", default='', nargs='?', help="Search number") parser.add_argument("-ss", "--specified-source", default='', nargs='?', help="specified Source.") parser.add_argument("-su", "--specified-url", default='', nargs='?', help="specified Url.") args = parser.parse_args() def set_natural_number_or_none(sk, value): if isinstance(value, str) and value.isnumeric() and int(value) >= 0: conf.set_override(f'{sk}={value}') def set_str_or_none(sk, value): if isinstance(value, str) and len(value): conf.set_override(f'{sk}={value}') def set_bool_or_none(sk, value): if isinstance(value, bool) and value: conf.set_override(f'{sk}=1') set_natural_number_or_none("common:main_mode", args.main_mode) set_natural_number_or_none("common:link_mode", args.link_mode) set_str_or_none("common:source_folder", args.path) set_bool_or_none("common:auto_exit", args.auto_exit) set_natural_number_or_none("common:nfo_skip_days", args.days) set_natural_number_or_none("advenced_sleep:stop_counter", args.cnt) set_bool_or_none("common:ignore_failed_list", args.ignore_failed_list) set_str_or_none("advenced_sleep:rerun_delay", args.delaytm) set_str_or_none("priority:website", args.site) if isinstance(args.dnimg, bool) and args.dnimg: conf.set_override("common:download_only_missing_images=0") set_bool_or_none("debug_mode:switch", args.debug) if isinstance(args.cfgcmd, list): for cmd in args.cfgcmd: conf.set_override(cmd[0]) no_net_op = False if conf.main_mode() == 3: no_net_op = args.no_network_operation if no_net_op: conf.set_override("advenced_sleep:stop_counter=0;advenced_sleep:rerun_delay=0s;face:aways_imagecut=1") return args.file, args.number, args.logdir, args.regexstr, args.zero_op, no_net_op, args.search, args.specified_source, args.specified_url class OutLogger(object): def __init__(self, logfile) -> None: self.term = sys.stdout self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def __del__(self): self.close() def __enter__(self): pass def __exit__(self, *args): self.close() def write(self, msg): self.term.write(msg) self.log.write(msg) def flush(self): if 'flush' in dir(self.term): self.term.flush() if 'flush' in dir(self.log): self.log.flush() if 'fileno' in dir(self.log): os.fsync(self.log.fileno()) def close(self): if self.term is not None: sys.stdout = self.term self.term = None if self.log is not None: self.log.close() self.log = None class ErrLogger(OutLogger): def __init__(self, logfile) -> None: self.term = sys.stderr self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def close(self): if self.term is not None: sys.stderr = self.term self.term = None if self.log is not None: self.log.close() self.log = None def dupe_stdout_to_logfile(logdir: str): """ Duplicates the standard output (stdout) and standard error (stderr) to log files. This function creates log files in the specified directory and redirects stdout and stderr to these files for logging purposes. :param logdir: The directory where log files will be created and saved. """ if not isinstance(logdir, str) or len(logdir) == 0: return log_dir = Path(logdir) if not log_dir.exists(): try: log_dir.mkdir(parents=True, exist_ok=True) except: pass if not log_dir.is_dir(): return # Tips for disabling logs by change directory to a same name empty regular file abslog_dir = log_dir.resolve() log_tmstr = datetime.now().strftime("%Y%m%dT%H%M%S") logfile = abslog_dir / f'mdc_{log_tmstr}.txt' errlog = abslog_dir / f'mdc_{log_tmstr}_err.txt' sys.stdout = OutLogger(logfile) sys.stderr = ErrLogger(errlog) def close_logfile(logdir: str): """ Closes the log files and restores standard output and error streams. This function is typically called at the end of the application to ensure that log files are properly closed. :param logdir: The directory where log files are saved. """ if not isinstance(logdir, str) or len(logdir) == 0 or not os.path.isdir(logdir): return # 日志关闭前保存日志路径 filepath = None try: filepath = sys.stdout.filepath except: pass sys.stdout.close() sys.stderr.close() log_dir = Path(logdir).resolve() if isinstance(filepath, Path): print(f"Log file '{filepath}' saved.") assert (filepath.parent.samefile(log_dir)) # 清理空文件 for f in log_dir.glob(r'*_err.txt'): if f.stat().st_size == 0: try: f.unlink(missing_ok=True) except: pass # 合并日志 只检测日志目录内的文本日志,忽略子目录。三天前的日志,按日合并为单个日志,三个月前的日志, # 按月合并为单个月志,去年及以前的月志,今年4月以后将之按年合并为年志 # 测试步骤: """ LOGDIR=/tmp/mlog mkdir -p $LOGDIR for f in {2016..2020}{01..12}{01..28};do;echo $f>$LOGDIR/mdc_${f}T235959.txt;done for f in {01..09}{01..28};do;echo 2021$f>$LOGDIR/mdc_2021${f}T235959.txt;done for f in {00..23};do;echo 20211001T$f>$LOGDIR/mdc_20211001T${f}5959.txt;done echo "$(ls -1 $LOGDIR|wc -l) files in $LOGDIR" # 1932 files in /tmp/mlog mdc -zgic1 -d0 -m3 -o $LOGDIR # python3 ./Movie_Data_Capture.py -zgic1 -o $LOGDIR ls $LOGDIR # rm -rf $LOGDIR """ today = datetime.today() # 第一步,合并到日。3天前的日志,文件名是同一天的合并为一份日志 for i in range(1): txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}T\d{6}$', f.stem, re.A)] if not txts or not len(txts): break e = [f for f in txts if '_err' in f.stem] txts.sort() tmstr_3_days_ago = (today.replace(hour=0) - timedelta(days=3)).strftime("%Y%m%dT99") deadline_day = f'mdc_{tmstr_3_days_ago}' day_merge = [f for f in txts if f.stem < deadline_day] if not day_merge or not len(day_merge): break cutday = len('T235959.txt') # cut length mdc_20201201|T235959.txt for f in day_merge: try: day_file_name = str(f)[:-cutday] + '.txt' # mdc_20201201.txt with open(day_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第二步,合并到月 for i in range(1): # 利用1次循环的break跳到第二步,避免大块if缩进或者使用goto语法 txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}$', f.stem, re.A)] if not txts or not len(txts): break txts.sort() tmstr_3_month_ago = (today.replace(day=1) - timedelta(days=3 * 30)).strftime("%Y%m32") deadline_month = f'mdc_{tmstr_3_month_ago}' month_merge = [f for f in txts if f.stem < deadline_month] if not month_merge or not len(month_merge): break tomonth = len('01.txt') # cut length mdc_202012|01.txt for f in month_merge: try: month_file_name = str(f)[:-tomonth] + '.txt' # mdc_202012.txt with open(month_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第三步,月合并到年 for i in range(1): if today.month < 4: break mons = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{6}$', f.stem, re.A)] if not mons or not len(mons): break mons.sort() deadline_year = f'mdc_{today.year - 1}13' year_merge = [f for f in mons if f.stem < deadline_year] if not year_merge or not len(year_merge): break toyear = len('12.txt') # cut length mdc_2020|12.txt for f in year_merge: try: year_file_name = str(f)[:-toyear] + '.txt' # mdc_2020.txt with open(year_file_name, 'a', encoding='utf-8') as y: y.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第四步,压缩年志 如果有压缩需求,请自行手工压缩,或者使用外部脚本来定时完成。推荐nongnu的lzip,对于 # 这种粒度的文本日志,压缩比是目前最好的。lzip -9的运行参数下,日志压缩比要高于xz -9,而且内存占用更少, # 多核利用率更高(plzip多线程版本),解压速度更快。压缩后的大小差不多是未压缩时的2.4%到3.7%左右, # 100MB的日志文件能缩小到3.7MB。 return filepath def signal_handler(*args): """ A signal handler function for handling operating system signals like Ctrl+C (SIGINT). It defines the behavior of the application when such signals are received, such as graceful termination. :param args: Variable argument list, used to handle signal information. """ print('[!]Ctrl+C detected, Exit.') os._exit(9) def sigdebug_handler(*args): """ A signal handler function specifically for toggling debug mode on or off. It alters the debug configuration based on certain system signals (like window size change in Unix systems). :param args: Variable argument list, used to handle signal information. """ conf = config.getInstance() conf.set_override(f"debug_mode:switch={int(not conf.debug())}") print(f"[!]Debug {('oFF', 'On')[int(conf.debug())]}") # 新增失败文件列表跳过处理,及.nfo修改天数跳过处理,提示跳过视频总数,调试模式(-g)下详细被跳过文件,跳过小广告 def movie_lists(source_folder, regexstr: str) -> typing.List[str]: """ Generates a list of movie file paths from the specified source folder. It filters files based on regular expressions and other criteria, such as file type and size. :param source_folder: The folder to scan for movie files. :param regexstr: A regular expression string to filter movie files. :return: A list of paths to the movie files that match the criteria. """ conf = config.getInstance() main_mode = conf.main_mode() debug = conf.debug() nfo_skip_days = conf.nfo_skip_days() link_mode = conf.link_mode() file_type = conf.media_type().lower().split(",") trailerRE = re.compile(r'-trailer\.', re.IGNORECASE) cliRE = None if isinstance(regexstr, str) and len(regexstr): try: cliRE = re.compile(regexstr, re.IGNORECASE) except: pass failed_list_txt_path = Path(conf.failed_folder()).resolve() / 'failed_list.txt' failed_set = set() if (main_mode == 3 or link_mode) and not conf.ignore_failed_list(): try: flist = failed_list_txt_path.read_text(encoding='utf-8').splitlines() failed_set = set(flist) if len(flist) != len(failed_set): # 检查去重并写回,但是不改变failed_list.txt内条目的先后次序,重复的只保留最后的 fset = failed_set.copy() for i in range(len(flist) - 1, -1, -1): fset.remove(flist[i]) if flist[i] in fset else flist.pop(i) failed_list_txt_path.write_text('\n'.join(flist) + '\n', encoding='utf-8') assert len(fset) == 0 and len(flist) == len(failed_set) except: pass if not Path(source_folder).is_dir(): print('[-]Source folder not found!') return [] total = [] # source = Path(source_folder).resolve() source = Path(source_folder) skip_failed_cnt, skip_nfo_days_cnt = 0, 0 escape_folder_set = set(re.split("[,,]", conf.escape_folder())) for full_name in source.glob(r'**/*'): if main_mode != 3 and set(full_name.parent.parts) & escape_folder_set: continue if not full_name.suffix.lower() in file_type: continue absf = str(full_name) if absf in failed_set: skip_failed_cnt += 1 if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue number = get_number(False, f.stem) if not number: continue skip_numbers.add(number.lower()) rm_list = [] for f in total: n_number = get_number(False, os.path.basename(f)) if n_number and n_number.lower() in skip_numbers: rm_list.append(f) for f in rm_list: total.remove(f) if debug: print(f"[!]Skip file successfully processed within {nfo_skip_days} days: '{f}'") if len(rm_list): print( f"[!]Skip {len(rm_list)} movies in success folder '{success_folder}' who's .nfo modified within {nfo_skip_days} days.") return total def create_failed_folder(failed_folder: str): """ 新建failed文件夹 """ if not os.path.exists(failed_folder): try: os.makedirs(failed_folder) except: print(f"[-]Fatal error! Can not make folder '{failed_folder}'") os._exit(0) def rm_empty_folder(path): """ Recursively removes empty folders from a given path. This function is useful for cleaning up the directory structure by removing folders that no longer contain any files. :param path: The path where empty folders will be searched for and removed. """ abspath = os.path.abspath(path) deleted = set() for current_dir, subdirs, files in os.walk(abspath, topdown=False): try: still_has_subdirs = any(_ for subdir in subdirs if os.path.join(current_dir, subdir) not in deleted) if not any(files) and not still_has_subdirs and not os.path.samefile(path, current_dir): os.rmdir(current_dir) deleted.add(current_dir) print('[+]Deleting empty folder', current_dir) except: pass def create_data_and_move(movie_path: str, zero_op: bool, no_net_op: bool, oCC): """ Processes a movie file, generates necessary data, and moves the file to an appropriate directory based on the outcome. This function is central to the application's file processing logic, including scraping, organizing, and error handling. :param movie_path: Path of the movie file to be processed. :param zero_op: A boolean flag indicating whether to perform a dry run (no actual file operations). :param no_net_op: A boolean flag to indicate whether network operations are to be skipped. :param oCC: An OpenCC instance for language conversion, if required. """ # Normalized number, eg: 111xxx-222.mp4 -> xxx-222.mp4 skip_file_names = config.getInstance().skip_file_names() debug = config.getInstance().debug() n_number = get_number(debug, os.path.basename(movie_path)) movie_path = os.path.abspath(movie_path) # print(movie_path) for skip_name in skip_file_names: if skip_name in movie_path: print('[+]Skipping file:{}'.format(movie_path)) return if debug is True: print(f"[!] [{n_number}] As Number Processing for '{movie_path}'") if zero_op: return if n_number: if no_net_op: core_main_no_net_op(movie_path, n_number) else: core_main(movie_path, n_number, oCC) else: print("[-] number empty ERROR")
moveFailedFolder(movie_path)
7
2023-11-25 03:16:13+00:00
16k
abdulhaim/LMRL-Gym
llm_rl_scripts/chess/ppo/train_ppo_gpt2_offline_endgames.py
[ { "identifier": "train_loop", "path": "LLM_RL/algorithms/ppo/train.py", "snippet": "def train_loop(\n trainer: PPOTrain, \n inference: PPOInference, \n policy: PPOPolicy, \n load_dataset: Callable[[PPOInference, PPOPolicy], Union[PPODataset, PPOIterableDataset]], \n evaluator: Optional[Callable[[PPOInference, PPOPolicy], Tuple[float, Dict[str, Any]]]], \n prng_key: KeyArray, \n save_dir: Optional[str], \n n_rounds: int, \n epochs: int, \n max_steps: Optional[int], \n bsize: int, \n log_every: int, \n eval_every_steps: Optional[int], \n eval_every_epochs: Optional[int], \n eval_every_rounds: Optional[int], \n eval_at_beginning: bool, \n eval_at_end: bool, \n save_every_steps: Optional[int], \n save_every_epochs: Optional[int], \n save_every_rounds: Optional[int], \n save_at_beginning: bool, \n save_at_end: bool, \n save_best: bool, \n max_checkpoints: Optional[int], \n save_train_state: bool, \n save_dtype: jnp.dtype, \n use_wandb: bool, \n wandb_project: Optional[str], \n wandb_run_name: Optional[str], \n wandb_config: Optional[Dict[str, Any]], \n is_main_process: Optional[bool]=None, \n bc_dataset: Optional[Union[MaskDataset, MaskIterableDataset]]=None, \n bc_bsize: Optional[int]=None, \n **loop_state: Dict[Hashable, Any], \n) -> Tuple[PPOTrain, PPOInference, PPOPolicy]:\n print(\"entering training loop ...\")\n assert (not use_wandb) or (use_wandb and wandb_project is not None)\n if is_main_process is None:\n is_main_process = jax.process_index() == 0\n if bc_bsize is None:\n bc_bsize = bsize\n \n # initalize wandb\n wandb_id = loop_state.get('wandb_id', None)\n if use_wandb and is_main_process:\n if wandb_id is None:\n wandb_id = wandb.util.generate_id()\n wandb.init(\n project=wandb_project, \n id=wandb_id, \n name=wandb_run_name, \n config=wandb_config, \n reinit=True, \n resume=\"allow\", \n )\n\n # initalize training loop state\n train_logs = []\n best_perf = loop_state.get('best_perf', float('inf'))\n saved_checkpoints = loop_state.get('saved_checkpoints', deque([]))\n step = 0\n epoch = -1\n round = -1\n def _save(\n name: str, \n add_to_queue: bool, \n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal saved_checkpoints\n print(f'saving checkpoint {name} ...')\n print(f'saving in {save_dir}...')\n # conditionally delete old checkpoints\n if add_to_queue and is_main_process:\n if (max_checkpoints is not None) and (len(saved_checkpoints) >= max_checkpoints):\n delete(saved_checkpoints.popleft(), recursive=True)\n curr_save_dir = os.path.join(save_dir, name)\n if is_main_process:\n create_path(curr_save_dir)\n dump_state(\n policy_model=trainer.policy_model, \n policy_train_state=trainer.policy_train_state, \n value_head_model=trainer.value_head_model, \n value_head_train_state=trainer.value_head_train_state, \n save_dir=curr_save_dir, \n save_train_state=save_train_state, \n enable_save=is_main_process, \n save_dtype=save_dtype, \n **loop_state, \n )\n if add_to_queue and is_main_process:\n saved_checkpoints.append(curr_save_dir)\n print('saved.')\n \n def _eval(\n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal best_perf\n nonlocal inference\n nonlocal policy\n # get eval logs\n print(\"beginning evaluation ...\")\n inference = inference.replace(\n policy_params=trainer.policy_train_state.params, \n value_head_params=trainer.value_head_train_state.params, \n )\n policy.set_params(trainer.policy_train_state.params)\n eval_perf, eval_logs = evaluator(inference, policy)\n\n # publish eval logs\n eval_logs = pull_logs(label_logs(eval_logs, 'eval', {'step': step+1, 'epoch': epoch, 'round': round}))\n log(eval_logs, use_wandb and is_main_process)\n\n # conditionally save best model and optimizer state\n if save_dir is not None and save_best and eval_perf < best_perf:\n print('new best model!')\n best_perf = eval_perf\n _save(\n name='best', \n add_to_queue=False, \n **{**loop_state, 'best_perf': best_perf}, \n )\n\n bc_d = None\n if bc_dataset is not None:\n prng_key, new_prng = jax.random.split(prng_key)\n bc_d = dataloader(new_prng, bc_dataset, bc_bsize, truncate=True)\n \n # begin training loop\n for round in tqdm(range(n_rounds)):\n \n print(f'beginning round {round} ...')\n print(f\"best performance: {best_perf}\")\n\n # load dataset\n dataset = load_dataset(inference, policy)\n\n steps_per_epoch = len(dataset) // bsize if isinstance(dataset, Dataset) else None\n if 'steps_per_epoch' in loop_state:\n assert steps_per_epoch == loop_state['steps_per_epoch'], 'loop_state steps_per_epoch does not match dataset steps_per_epoch'\n\n # begin evaluation\n if evaluator is not None and eval_at_beginning:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save initial checkpoint\n if save_dir is not None and save_at_beginning:\n _save(\n name='initial', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n print(\"num epochs: \", epochs)\n for epoch in tqdm(range(epochs)):\n prng_key, new_prng = jax.random.split(prng_key)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n print(\"steps per epoch: \", steps_per_epoch)\n for batch in tqdm(d, total=steps_per_epoch):\n if bc_d is not None:\n try:\n bc_batch = next(bc_d)\n except StopIteration as e:\n prng_key, new_prng = jax.random.split(prng_key)\n bc_d = dataloader(new_prng, bc_dataset, bc_bsize, truncate=True)\n bc_batch = next(bc_d)\n batch = {**batch, **{'bc_data_'+k: v for k, v in bc_batch.items()}}\n \n # step model and get training logs\n if 'step' in loop_state and step < loop_state['step']:\n step += 1\n continue\n # print(\"trainer step: \", step)\n trainer, _, info = trainer.step(\n **batch, \n prng_key=new_prng, \n train=True, \n )\n train_logs.append(info)\n \n # publish training logs and clear logs\n if (step + 1) % log_every == 0:\n logs = combine_logs(train_logs)\n logs = pull_logs(label_logs(logs, 'train', {'step': step+1, 'epoch': epoch, 'round': round}))\n log(logs, use_wandb and is_main_process)\n train_logs = []\n \n # begin evaluation\n if evaluator is not None and eval_every_steps is not None and (step + 1) % eval_every_steps == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_steps is not None and (step + 1) % save_every_steps == 0:\n _save(\n name='step_%d' % (step+1), \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n step += 1\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_epochs is not None and (epoch + 1) % eval_every_epochs == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_epochs is not None and (epoch + 1) % save_every_epochs == 0:\n _save(\n name=f'epoch_{epoch}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_rounds is not None and (round + 1) % eval_every_rounds == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_rounds is not None and (round + 1) % save_every_rounds == 0:\n _save(\n name='round_%d' % (round), \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n inference = inference.replace(\n policy_params=trainer.policy_train_state.params, \n value_head_params=trainer.value_head_train_state.params, \n )\n policy.set_params(trainer.policy_train_state.params)\n \n # begin evaluation\n if evaluator is not None and eval_at_end:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save final checkpoint\n if save_dir is not None and save_at_end:\n print(\"saving final checkpoint!\")\n _save(\n name='last', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n # stop wandb\n if use_wandb and is_main_process:\n wandb.finish()\n \n inference = inference.replace(\n policy_params=trainer.policy_train_state.params, \n value_head_params=trainer.value_head_train_state.params, \n )\n policy.set_params(trainer.policy_train_state.params)\n return trainer, inference, policy" }, { "identifier": "ppo_loss_fn", "path": "LLM_RL/algorithms/ppo/base_interface.py", "snippet": "def ppo_loss_fn(\n attention_mask: jax.Array, # [batch, time-1] – output is masked; shift x[1:]\n logprobs: jax.Array, # [batch, time-1] – logprob of output produced; shift x[1:]\n values: jax.Array, # [batch, time-1] – value of current state; shift x[:-1]\n should_take_action: jax.Array, # [batch, time-1] – is output produced by action; shift x[1:]\n old_logprobs: jax.Array, # [batch, time-1] – logprob of output produced; shift x[1:]\n old_values: jax.Array, # [batch, time-1] – value of current state; shift x[:-1]\n old_advantages: jax.Array, # [batch, time-1] – advantage of output produced; shift x[1:]\n old_returns: jax.Array, # [batch, time-1] – return of current state; shift x[:-1]\n *, \n cliprange_value: Union[float, jax.Array], \n cliprange: Union[float, jax.Array], \n value_loss_coef: Union[float, jax.Array], \n) -> Tuple[jax.Array, Dict[str, Any]]:\n \"\"\"PPO objective function.\n References:\n - https://github.com/CarperAI/trlx/blob/main/trlx/models/modeling_ppo.py\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n \"\"\"\n mask = should_take_action.astype(jnp.float32) * attention_mask\n n = mask.sum()\n \n values_clipped = jnp.clip(\n values, \n old_values - cliprange_value, \n old_values + cliprange_value, \n )\n\n vf_loss1 = (values - old_returns) ** 2\n vf_loss2 = (values_clipped - old_returns) ** 2\n vf_loss = 0.5 * jnp.sum(jnp.maximum(vf_loss1, vf_loss2) * mask) / n\n vf_clipfrac = jnp.sum((vf_loss2 > vf_loss1).astype(jnp.float32) * mask) / n\n\n log_ratio = (logprobs - old_logprobs) * mask\n ratio = jnp.exp(log_ratio)\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\n approx_kl = jnp.sum((ratio - 1) - log_ratio) / n\n\n pg_loss1 = -old_advantages * ratio\n pg_loss2 = -old_advantages * jnp.clip(\n ratio, \n 1.0 - cliprange, \n 1.0 + cliprange, \n )\n pg_loss = jnp.sum(jnp.maximum(pg_loss1, pg_loss2) * mask) / n\n pg_clipfrac = jnp.sum((pg_loss2 > pg_loss1).astype(jnp.float32) * mask) / n\n\n loss = pg_loss + value_loss_coef * vf_loss\n\n logs = dict(\n losses=dict(\n total_loss=loss, \n policy_loss=pg_loss, \n value_loss=vf_loss, \n ), \n values=dict(\n get_tensor_stats(values, mask, n), \n values_error=jnp.sum(((values - old_returns) * mask) ** 2) / n, \n clipfrac=vf_clipfrac, \n ), \n old_values=get_tensor_stats(old_values, mask, n), \n returns=get_tensor_stats(old_returns, mask, n), \n policy=dict(\n approx_kl=approx_kl, \n clipfrac=pg_clipfrac, \n ), \n ratio=(ratio * mask).sum() / n, \n padding_percentage=n / mask.size, \n )\n\n return loss, logs" }, { "identifier": "FixedKLController", "path": "LLM_RL/algorithms/ppo/base_interface.py", "snippet": "class FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef):\n self.value = kl_coef\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns updated KL coefficient, βₜ₊₁.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n pass" }, { "identifier": "AdaptiveKLController", "path": "LLM_RL/algorithms/ppo/base_interface.py", "snippet": "class AdaptiveKLController:\n \"\"\"Adaptive KL Controller as described in Ziegler et al. \"Fine-Tuning Language Models from Human Preferences\"\n Reference: Section 2.2 https://arxiv.org/pdf/1909.08593.pdf#page=2\n Source: https://github.com/openai/lm-human-preferences/blob/master/lm_human_preferences/train_policy.py\n \"\"\"\n\n def __init__(self, init_kl_coef: float, target: float, horizon: int):\n self.value = init_kl_coef\n self.target = target\n self.horizon = horizon\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns adaptively updated KL coefficient, βₜ₊₁.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n proportional_error = np.clip(current / self.target - 1, -0.2, 0.2) # ϵₜ\n mult = 1 + proportional_error * n_steps / self.horizon\n self.value *= mult # βₜ₊₁" }, { "identifier": "GPT2ILQLPolicy", "path": "LLM_RL/algorithms/ppo/gpt2/interface.py", "snippet": "class GPT2PPOTrain(PPOTrain):\nclass PPOForwardOutputGPT2(NamedTuple):\nclass GPT2PPOInference(PPOInference):\nclass GPT2PPOPolicy(PPOPolicy):\n def load_train(\n cls, \n policy_train_state: TrainState, \n value_head_train_state: TrainState, \n policy_model: FlaxPreTrainedModel, \n value_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n bc_loss_fn: Optional[Callable]=None, \n bc_loss_weight: float=0.0, \n ) -> GPT2PPOTrain:\n def _step(\n policy_train_state: TrainState, \n value_head_train_state: TrainState, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n old_logprobs: jax.Array, \n old_values: jax.Array, \n old_advantages: jax.Array, \n old_returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n bc_data_input_ids: Optional[jax.Array], \n bc_data_input_attention_mask: Optional[jax.Array], \n bc_data_input_position_ids: Optional[jax.Array], \n bc_data_input_training_mask: Optional[jax.Array], \n train: bool=True, \n ) -> Tuple[TrainState, TrainState, jax.Array, PyTree]:\n def grad_loss(policy_params: PyTree, value_head_params: PyTree, prng_key: Optional[jax.random.PRNGKeyArray]):\n def grad_bc_loss(policy_params: PyTree, prng_key: Optional[jax.random.PRNGKeyArray]):\n def load_inference(\n cls, \n initial_policy_params: Optional[PyTree], \n policy_params: PyTree, \n value_head_params: PyTree, \n initial_policy_model: Optional[FlaxPreTrainedModel], \n policy_model: FlaxPreTrainedModel, \n value_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Optional[Callable], \n dp_shard_logits: bool=True, \n bc_loss_fn: Optional[Callable]=None, \n bc_loss_weight: float=0.0, \n ) -> GPT2PPOInference:\n def _forward(\n initial_policy_params: Optional[PyTree], \n policy_params: PyTree, \n value_head_params: PyTree, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n initial_policy_output_attentions: Optional[bool]=None, \n initial_policy_output_hidden_states: Optional[bool]=None, \n policy_output_attentions: Optional[bool]=None, # no policy_output_hidden_states option because this is required\n train: bool=False, \n ) -> PPOForwardOutputGPT2:\n def _eval_loss(\n policy_params: PyTree, \n value_head_params: PyTree, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n old_logprobs: jax.Array, \n old_values: jax.Array, \n old_advantages: jax.Array, \n old_returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n bc_data_input_ids: Optional[jax.Array], \n bc_data_input_attention_mask: Optional[jax.Array], \n bc_data_input_position_ids: Optional[jax.Array], \n bc_data_input_training_mask: Optional[jax.Array], \n train: bool=False, \n ) -> Tuple[jax.Array, PyTree]:\n def __init__(\n self, \n inference: GPT2Inference, \n prng_key: Optional[jax.random.KeyArray], \n generation_config: Optional[GenerationConfig]=None, \n blocking_strategy: BlockingStrategy=BlockingStrategy(padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=None), \n in_str_process: Optional[Callable[[str], str]]=None, \n out_str_process: Optional[Callable[[str], str]]=None, \n input_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n target_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n trace: bool=True, \n ):\n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n def set_params(self, policy_params: PyTree) -> None:" }, { "identifier": "load_train_state_from_config", "path": "LLM_RL/heads/linear_head.py", "snippet": "def load_train_state_from_config(\n model_config: LinearHeadConfig, \n model_dtype: Union[str, jnp.dtype], \n optim_getter: Callable[[PyTree], optax.GradientTransformation], \n mesh: Mesh, # should be shape (dp, mp)\n prng_key: jax.random.PRNGKeyArray, \n pad_to_output_dim: Optional[int]=None, \n params_dtype: Optional[Union[str, jnp.dtype]]=jnp.float32, \n) -> Tuple[TrainState, LinearHead]:\n \n model = LinearHead(model_config, dtype=model_dtype)\n model.config.mesh = mesh\n # shard params\n params = freeze(shard_params_from_config(model, prng_key, params_dtype=params_dtype))\n # pad outputs\n if pad_to_output_dim is not None:\n params = freeze(pad_outputs(unfreeze(params), model, pad_to_output_dim, dtype=params_dtype))\n # shard train_state\n train_state = shard_train_state_from_params(model, params, optim_getter(params))\n\n return train_state, model" }, { "identifier": "LinearHeadConfig", "path": "LLM_RL/heads/linear_head.py", "snippet": "class LinearHeadConfig(HeadConfig):\n def __init__(\n self, \n input_dim: int, \n output_dim: int, \n use_bias: bool=True, \n unpadded_output_dim: Optional[int]=None, \n initializer_range: Optional[int]=None, \n bias_init: Optional[float]=None, \n mesh: Optional[jax.sharding.Mesh]=None, \n ) -> None:\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.use_bias = use_bias\n self.initializer_range = initializer_range\n self.bias_init = bias_init\n self.mesh = mesh\n self.unpadded_output_dim = unpadded_output_dim\n if self.unpadded_output_dim is None:\n self.unpadded_output_dim = self.output_dim\n super().__init__()\n \n @staticmethod\n def get_partition_rules():\n return [\n (re.escape(\"['dense']['kernel']\"), PS()), \n (re.escape(\"['dense']['bias']\"), PS()), \n ]\n \n def to_dict(self) -> Dict[str, Any]:\n if self.mesh is None:\n return super().to_dict()\n else:\n new_conf = LinearHeadConfig(**self.__dict__)\n new_conf.mesh = None\n return new_conf.to_dict()" }, { "identifier": "PPODataset", "path": "LLM_RL/algorithms/ppo/data.py", "snippet": "class PPODataset(Dataset):\n def __init__(\n self, \n input_ids: np.ndarray, # [b, t]\n should_take_action: np.ndarray, # [b, t-1]\n old_logprobs: np.ndarray, # [b, t-1]\n old_values: np.ndarray, # [b, t-1]\n old_advantages: np.ndarray, # [b, t-1]\n old_returns: np.ndarray, # [b, t-1]\n ):\n assert input_ids.shape[1] == (should_take_action.shape[1]+1)\n assert input_ids.shape[1] == (old_logprobs.shape[1]+1)\n assert input_ids.shape[1] == (old_values.shape[1]+1)\n assert input_ids.shape[1] == (old_advantages.shape[1]+1)\n assert input_ids.shape[1] == (old_returns.shape[1]+1)\n\n assert input_ids.shape[0] == should_take_action.shape[0]\n assert input_ids.shape[0] == old_logprobs.shape[0]\n assert input_ids.shape[0] == old_values.shape[0]\n assert input_ids.shape[0] == old_advantages.shape[0]\n assert input_ids.shape[0] == old_returns.shape[0]\n\n self.input_ids = input_ids\n self.should_take_action = should_take_action\n self.old_logprobs = old_logprobs\n self.old_values = old_values\n self.old_advantages = old_advantages\n self.old_returns = old_returns\n \n def __getitem__(self, index):\n return {\n 'input_ids': jnp.asarray(self.input_ids[index], dtype=jnp.int32), \n 'should_take_action': jnp.asarray(self.should_take_action[index], dtype=jnp.bool_), \n 'old_logprobs': jnp.asarray(self.old_logprobs[index], dtype=jnp.float32), \n 'old_values': jnp.asarray(self.old_values[index], dtype=jnp.float32), \n 'old_advantages': jnp.asarray(self.old_advantages[index], dtype=jnp.float32), \n 'old_returns': jnp.asarray(self.old_returns[index], dtype=jnp.float32), \n }\n \n def __len__(self):\n return self.input_ids.shape[0]\n \n @classmethod\n def from_ppo_data_list(\n cls, \n ppo_data_list: List[PPOData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> PPODataset:\n \n data = PPOData.block(ppo_data_list, blocking_strategy, tokenizer)\n\n return cls(**data)" }, { "identifier": "chess_text_trajectory_chain_from_json", "path": "llm_rl_scripts/chess/env/data.py", "snippet": "def chess_text_trajectory_chain_from_json(data, scaling=1):\n idx = 0\n text_trajectory_chains = []\n while idx < len(data):\n trajectories = []\n done = False\n while not done and idx < len(data):\n if data[idx] == \"\":\n # print(\"here!\")\n # embed()\n idx += 1\n break\n result = json.loads(data[idx])\n state = Text(preprocess_state_og(result[\"from_state\"]), False)\n action = Text(preprocess_move(result[\"action\"]), True)\n trajectory = TextTrajectory([state, action], [0, scaling*result[\"reward\"]], result[\"done\"])\n trajectories.append(trajectory)\n done = result[\"done\"]\n idx += 1\n \n if len(trajectories) == 200:\n break\n \n if not trajectories:\n break\n chain = None\n for text_trajectory in trajectories[::-1]:\n chain = TextTrajectoryChain(\n text_trajectory=text_trajectory, \n next=chain, \n )\n # print(chain)\n text_trajectory_chains.append(chain)\n random.shuffle(text_trajectory_chains)\n return text_trajectory_chains\n # if not result[\"done\"]:\n # data.append(result) " }, { "identifier": "get_data_from_bucket", "path": "llm_rl_scripts/chess/env/data.py", "snippet": "def get_data_from_bucket(bucket_name, blob_name):\n bucket = client.get_bucket(bucket_name)\n blob = bucket.get_blob(blob_name)\n\n blob_data = blob.download_as_text()\n blob_data = blob_data.split(\"\\n\")\n return blob_data" }, { "identifier": "get_random_positions_not_in_test", "path": "llm_rl_scripts/chess/env/data.py", "snippet": "def get_random_positions_not_in_test(bucket_name=bucket_name, blob_name=blob_name, num_pos_per_setup=4):\n test_positions = get_data_from_bucket(bucket_name, blob_name)\n test_positions = test_positions[:500]\n test_positions = [position.replace(\"\\n\", \"\").replace(\"\\\"\", \"\") for position in test_positions]\n \n total_positions = []\n for setup in [\"kQK\", \"kRK\", \"kQRK\", \"kRRK\"]:\n random_positions = []\n while len(random_positions) < num_pos_per_setup:\n random_position = large_piece_random_endgame(setup)\n if random_position not in test_positions:\n random_positions.append(random_position)\n total_positions.extend(random_positions)\n \n return total_positions" }, { "identifier": "text_env_eval_chess_positions", "path": "llm_rl_scripts/chess/env/env.py", "snippet": "def text_env_eval_chess_positions(\n positions: List[str],\n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_history here\n seed_generator: Optional[Iterator[int]]=None, \n env_options: Optional[Dict]=None, # only allow one env_options here\n interaction_callback: Optional[Callable[[List[Tuple[TextHistory, TextHistory, TextHistory, float, bool]]], None]]=None, \n bsize: int=1, \n verbose: bool=True,\n random_opponent: bool=False,\n max_moves: int=400,\n):\n interactions, rs, dones = [], [], []\n victories, percent_illegals, episode_length = [], [], []\n for position in positions:\n env = FenChessHistoryEnv(from_position=position, random_opponent=random_opponent, max_moves=max_moves)\n env_interactions = []\n for _ in tqdm(range((n_rollouts+(bsize-1))//bsize), disable=not verbose):\n actual_bsize = min(n_rollouts-len(env_interactions), bsize)\n npad = bsize - actual_bsize\n interaction_batch = interact_environment(\n env, \n policy, \n initial_text_history=initial_text_history, \n env_seed=[None]*actual_bsize if seed_generator is None else [next(seed_generator) for _ in range(actual_bsize)], \n env_options=[env_options]*actual_bsize, \n bsize=actual_bsize,\n npad=npad,\n )\n \n for interaction in interaction_batch:\n env_interactions.append(interaction)\n \n # collect some metrics about how the chess agent did\n rewards = [x.reward for x in interaction]\n victories.append(1 if 1 in rewards else 0)\n num_illegal = sum([1 if x.reward == -1 and i < len(rewards) - 1 else 0 for i, x in enumerate(interaction)])\n percent_illegal = num_illegal / len(rewards) * 100\n percent_illegals.append(percent_illegal)\n episode_length.append(len(rewards))\n \n # collect the rewards and dones\n rs.append(sum(map(lambda x: x.reward, interaction)))\n dones.append(interaction[-1].done)\n if interaction_callback is not None:\n interaction_callback(interaction)\n interactions.extend(env_interactions)\n \n rs = np.asarray(rs, dtype=np.float32)\n dones = np.asarray(dones, dtype=np.float32)\n results_summary = dict(\n reward=dict(\n mean=np.mean(rs), \n std=np.std(rs), \n min=np.min(rs), \n max=np.max(rs), \n ), \n done=dict(\n mean=np.mean(dones), \n std=np.std(dones), \n min=np.min(dones), \n max=np.max(dones), \n ), \n victories=dict(\n mean=np.mean(victories),\n std=np.std(victories),\n min=np.min(victories),\n max=np.max(victories),\n ),\n percent_illegals=dict(\n mean=np.mean(percent_illegals),\n std=np.std(percent_illegals),\n min=np.min(percent_illegals),\n max=np.max(percent_illegals),\n ),\n episode_length=dict(\n mean=np.mean(episode_length),\n std=np.std(episode_length),\n min=np.min(episode_length),\n max=np.max(episode_length),\n ),\n )\n \n return interactions, results_summary" } ]
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from transformers import AutoTokenizer from JaxSeq.utils import convert_path, load_mesh, get_dtype, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask, create_path, get_enabled_save_path from JaxSeq.models.gpt2.interface import GPT2Inference from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ppo.train import train_loop from LLM_RL.algorithms.ppo.base_interface import ppo_loss_fn, FixedKLController, AdaptiveKLController from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.algorithms.ppo.gpt2.interface import GPT2ILQLPolicy, GPT2ILQLInference, GPT2PPOTrain from LLM_RL.heads.linear_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.linear_head import LinearHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ppo.data import PPODataset from functools import partial from JaxSeq.logs import pull_logs from JaxSeq.utils import multihost_device_get from llm_rl_scripts.chess.env.data import chess_text_trajectory_chain_from_json, get_data_from_bucket, get_random_positions_not_in_test from llm_rl_scripts.chess.env.env import text_env_eval_chess_positions import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np
11,394
b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config( model_config=LinearHeadConfig( input_dim=policy_model.config.n_embd, output_dim=1, use_bias=True, initializer_range=0.0, ), model_dtype=jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=head_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loss_f = partial(ppo_loss_fn, cliprange_value=cliprange_value, cliprange=cliprange, value_loss_coef=value_loss_coef) ppo_inference = GPT2ILQLInference.load_inference( initial_policy_params=initial_policy_params, policy_params=policy_train_state.params, value_head_params=value_head_train_state.params, initial_policy_model=policy_model, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, ) ppo_trainer = GPT2PPOTrain.load_train( policy_train_state=policy_train_state, value_head_train_state=value_head_train_state, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, ) if use_adaptive_kl: kl_controller = AdaptiveKLController(init_kl_coef=init_kl_coef, target=kl_target, horizon=kl_horizon) else: kl_controller = FixedKLController(kl_coef=init_kl_coef) bucket_name = "rl-llm-bench-dataset" blob_name = "endgames/train_unshuffled.jsonl" data = get_data_from_bucket(bucket_name, blob_name) text_trajectory_chains = chess_text_trajectory_chain_from_json(data) n_rounds = len(text_trajectory_chains) // 256 data_round = 0 def ppo_dataset_loader(ppo_inference:GPT2ILQLInference, policy, num_to_sample=256): nonlocal data_round # num_to_sample = len(text_trajectory_chains) // n_rounds chains_for_round = text_trajectory_chains[data_round*num_to_sample:(data_round+1)*num_to_sample] print("congrats! you are done loading data!!") ppo_data, all_kls = ppo_inference.get_ppo_data_from_text_trajectory_chain( chains_for_round, bsize=ppo_data_bsize, max_length=max_input_length+max_output_length, gamma=gamma, lam=lam, kl_weight=kl_controller.value, use_advantage_whitening=use_advantage_whitening, ) mean_kl = all_kls.mean().item() kl_controller.update(mean_kl, train_bsize) ppo_dataset = PPODataset.from_ppo_data_list( ppo_data, tokenizer, BlockingStrategy(Padding.RIGHT, Truncation.RIGHT, max_input_length+max_output_length), ) if save_dir is not None and save_ppo_dataset: print('saving ppo dataset ...') data_save_path = os.path.join(save_dir, 'data_saves', f'{data_round}') if is_main_process: create_path(data_save_path) # save ppo_dataset with open(get_enabled_save_path( os.path.join(data_save_path, 'ppo_dataset.pkl'), enabled=is_main_process, ), 'wb') as f: pkl.dump(ppo_dataset, f) # save text_trajectory_chains with open(get_enabled_save_path( os.path.join(data_save_path, 'text_trajectory_chains.pkl'), enabled=is_main_process, ), 'wb') as f: pkl.dump(text_trajectory_chains, f) # save raw_results # with open(get_enabled_save_path( # os.path.join(data_save_path, 'raw_results.pkl'), # enabled=is_main_process, # ), 'wb') as f: # pkl.dump(raw_results, f) # save summary_results # with open(get_enabled_save_path( # os.path.join(data_save_path, 'summary_results.json'), # enabled=is_main_process, # ), 'w') as f: # json.dump(summary_results, f) # print('done saving ppo dataset.') data_round += 1 return ppo_dataset def evaluator(inference, policy): bucket_name = "rl-llm-bench-dataset" blob_name = "endgames/test_positions.jsonl"
def main( model_load_mode: ModelLoadMode, model_load_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]=None, n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, num_pos_per_setup: int=1, lr: float=1e-5, weight_decay: float=0.0, train_bsize: int=32, grad_accum_steps: int=1, rollout_bsize: int=32, n_rollouts: int=16, ppo_data_bsize: int=32, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', use_fp16_activations: bool=False, use_fp16_params: bool=False, max_input_length: int=512, max_output_length: int=512, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=None, eval_every_rounds: Optional[int]=1, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=None, save_every_rounds: Optional[int]=None, save_at_beginning: bool=False, save_at_end: bool=True, save_best: bool=True, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_ppo_dataset: bool=False, save_bf16: bool=True, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, gamma: float=1.0, lam: float=0.95, use_advantage_whitening: bool=True, init_kl_coef: float=0.001, kl_target: Optional[float]=None, kl_horizon: Optional[int]=None, cliprange_value: float=0.2, cliprange: float=0.2, value_loss_coef: float=1.0, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, on_cloud_bucket: bool=True, ): input_args = locals().copy() print(input_args) use_adaptive_kl = (kl_target is not None and kl_horizon is not None) if not use_adaptive_kl: assert kl_target is None and kl_horizon is None tokenizer = AutoTokenizer.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_dtype = get_dtype(use_fp16=use_fp16_activations) params_dtype = get_dtype(use_fp16=use_fp16_params) model_prng_key = jax.random.PRNGKey(2) policy_train_state, policy_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=model_dtype, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=params_dtype, ) policy_model.config.gradient_checkpointing = gradient_checkpointing policy_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy with jax.default_device(jax.devices('cpu')[0]): initial_policy_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), policy_train_state.params, ) initial_policy_params = shard_params_from_params( model=policy_model, params=initial_policy_params, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f) policy_inference = GPT2Inference.load_inference( params=policy_train_state.params, model=policy_model, tokenizer=tokenizer, ) # env = FenChessEnvSingleTurn() policy_prng = jax.random.PRNGKey(0) policy = GPT2ILQLPolicy( inference=policy_inference, prng_key=policy_prng, generation_config=GenerationConfig( do_sample=policy_do_sample, num_beams=policy_num_beams, temperature=policy_temperature, top_p=policy_top_p, top_k=policy_top_k, eos_token_id=tokenizer.encode('\n')[0], pad_token_id=tokenizer.pad_token_id, max_new_tokens=max_output_length, ), blocking_strategy=BlockingStrategy( padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=max_input_length, ), out_str_process=lambda x: x.removesuffix('\n')+'\n', ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config( model_config=LinearHeadConfig( input_dim=policy_model.config.n_embd, output_dim=1, use_bias=True, initializer_range=0.0, ), model_dtype=jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=head_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loss_f = partial(ppo_loss_fn, cliprange_value=cliprange_value, cliprange=cliprange, value_loss_coef=value_loss_coef) ppo_inference = GPT2ILQLInference.load_inference( initial_policy_params=initial_policy_params, policy_params=policy_train_state.params, value_head_params=value_head_train_state.params, initial_policy_model=policy_model, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, ) ppo_trainer = GPT2PPOTrain.load_train( policy_train_state=policy_train_state, value_head_train_state=value_head_train_state, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, ) if use_adaptive_kl: kl_controller = AdaptiveKLController(init_kl_coef=init_kl_coef, target=kl_target, horizon=kl_horizon) else: kl_controller = FixedKLController(kl_coef=init_kl_coef) bucket_name = "rl-llm-bench-dataset" blob_name = "endgames/train_unshuffled.jsonl" data = get_data_from_bucket(bucket_name, blob_name) text_trajectory_chains = chess_text_trajectory_chain_from_json(data) n_rounds = len(text_trajectory_chains) // 256 data_round = 0 def ppo_dataset_loader(ppo_inference:GPT2ILQLInference, policy, num_to_sample=256): nonlocal data_round # num_to_sample = len(text_trajectory_chains) // n_rounds chains_for_round = text_trajectory_chains[data_round*num_to_sample:(data_round+1)*num_to_sample] print("congrats! you are done loading data!!") ppo_data, all_kls = ppo_inference.get_ppo_data_from_text_trajectory_chain( chains_for_round, bsize=ppo_data_bsize, max_length=max_input_length+max_output_length, gamma=gamma, lam=lam, kl_weight=kl_controller.value, use_advantage_whitening=use_advantage_whitening, ) mean_kl = all_kls.mean().item() kl_controller.update(mean_kl, train_bsize) ppo_dataset = PPODataset.from_ppo_data_list( ppo_data, tokenizer, BlockingStrategy(Padding.RIGHT, Truncation.RIGHT, max_input_length+max_output_length), ) if save_dir is not None and save_ppo_dataset: print('saving ppo dataset ...') data_save_path = os.path.join(save_dir, 'data_saves', f'{data_round}') if is_main_process: create_path(data_save_path) # save ppo_dataset with open(get_enabled_save_path( os.path.join(data_save_path, 'ppo_dataset.pkl'), enabled=is_main_process, ), 'wb') as f: pkl.dump(ppo_dataset, f) # save text_trajectory_chains with open(get_enabled_save_path( os.path.join(data_save_path, 'text_trajectory_chains.pkl'), enabled=is_main_process, ), 'wb') as f: pkl.dump(text_trajectory_chains, f) # save raw_results # with open(get_enabled_save_path( # os.path.join(data_save_path, 'raw_results.pkl'), # enabled=is_main_process, # ), 'wb') as f: # pkl.dump(raw_results, f) # save summary_results # with open(get_enabled_save_path( # os.path.join(data_save_path, 'summary_results.json'), # enabled=is_main_process, # ), 'w') as f: # json.dump(summary_results, f) # print('done saving ppo dataset.') data_round += 1 return ppo_dataset def evaluator(inference, policy): bucket_name = "rl-llm-bench-dataset" blob_name = "endgames/test_positions.jsonl"
positions = get_random_positions_not_in_test(bucket_name=bucket_name, blob_name=blob_name, num_pos_per_setup=num_pos_per_setup)
10
2023-11-21 00:16:42+00:00
16k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Kernerl size of conv layers.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\r\n \"\"\"Construct an ConvolutionModule object.\r\n \"\"\"\r\n super(ConvolutionModule, self).__init__()\r\n # kernerl_size should be a odd number for 'SAME' padding\r\n assert (kernel_size - 1) % 2 == 0\r\n\r\n self.pointwise_conv1 = nn.Conv1d(\r\n channels,\r\n 2 * channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.depthwise_conv = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n groups=channels,\r\n bias=bias,\r\n )\r\n self.norm = nn.BatchNorm1d(channels)\r\n self.pointwise_conv2 = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Compute convolution module.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, channels).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, channels).\r\n\r\n \"\"\"\r\n # exchange the temporal dimension and the feature dimension\r\n x = x.transpose(1, 2)\r\n\r\n # GLU mechanism\r\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\r\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\r\n\r\n # 1D Depthwise Conv\r\n x = self.depthwise_conv(x)\r\n x = self.activation(self.norm(x))\r\n\r\n x = self.pointwise_conv2(x)\r\n\r\n return x.transpose(1, 2)\r" }, { "identifier": "EncoderLayer", "path": "src/clap_module/conformer/encoder_layer.py", "snippet": "class EncoderLayer(nn.Module):\r\n \"\"\"Encoder layer module.\r\n\r\n Args:\r\n size (int): Input dimension.\r\n self_attn (torch.nn.Module): Self-attention module instance.\r\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance\r\n can be used as the argument.\r\n feed_forward (torch.nn.Module): Feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n conv_module (torch.nn.Module): Convolution module instance.\r\n `ConvlutionModule` instance can be used as the argument.\r\n dropout_rate (float): Dropout rate.\r\n normalize_before (bool): Whether to use layer_norm before the first block.\r\n concat_after (bool): Whether to concat attention layer's input and output.\r\n if True, additional linear will be applied.\r\n i.e. x -> x + linear(concat(x, att(x)))\r\n if False, no additional linear will be applied. i.e. x -> x + att(x)\r\n stochastic_depth_rate (float): Proability to skip this layer.\r\n During training, the layer may skip residual computation and return input\r\n as-is with given probability.\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n size,\r\n self_attn,\r\n feed_forward,\r\n feed_forward_macaron,\r\n conv_module,\r\n dropout_rate,\r\n normalize_before=True,\r\n concat_after=False,\r\n stochastic_depth_rate=0.0,\r\n ):\r\n \"\"\"Construct an EncoderLayer object.\"\"\"\r\n super(EncoderLayer, self).__init__()\r\n self.self_attn = self_attn\r\n self.feed_forward = feed_forward\r\n self.feed_forward_macaron = feed_forward_macaron\r\n self.conv_module = conv_module\r\n self.norm_ff = LayerNorm(size) # for the FNN module\r\n self.norm_mha = LayerNorm(size) # for the MHA module\r\n if feed_forward_macaron is not None:\r\n self.norm_ff_macaron = LayerNorm(size)\r\n self.ff_scale = 0.5\r\n else:\r\n self.ff_scale = 1.0\r\n if self.conv_module is not None:\r\n self.norm_conv = LayerNorm(size) # for the CNN module\r\n self.norm_final = LayerNorm(size) # for the final output of the block\r\n self.dropout = nn.Dropout(dropout_rate)\r\n self.size = size\r\n self.normalize_before = normalize_before\r\n self.concat_after = concat_after\r\n if self.concat_after:\r\n self.concat_linear = nn.Linear(size + size, size)\r\n self.stochastic_depth_rate = stochastic_depth_rate\r\n\r\n def forward(self, x_input, mask, cache=None):\r\n \"\"\"Compute encoded features.\r\n\r\n Args:\r\n x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.\r\n - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].\r\n - w/o pos emb: Tensor (#batch, time, size).\r\n mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).\r\n cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, size).\r\n torch.Tensor: Mask tensor (#batch, 1, time).\r\n\r\n \"\"\"\r\n if isinstance(x_input, tuple):\r\n x, pos_emb = x_input[0], x_input[1]\r\n else:\r\n x, pos_emb = x_input, None\r\n\r\n skip_layer = False\r\n # with stochastic depth, residual connection `x + f(x)` becomes\r\n # `x <- x + 1 / (1 - p) * f(x)` at training time.\r\n stoch_layer_coeff = 1.0\r\n if self.training and self.stochastic_depth_rate > 0:\r\n skip_layer = torch.rand(1).item() < self.stochastic_depth_rate\r\n stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)\r\n\r\n if skip_layer:\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n return x, mask\r\n\r\n # whether to use macaron style\r\n if self.feed_forward_macaron is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward_macaron(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n\r\n # convolution module\r\n \"\"\"\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n \"\"\"\r\n\r\n # multi-headed self-attention module\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n if cache is None:\r\n x_q = x\r\n else:\r\n assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)\r\n x_q = x[:, -1:, :]\r\n residual = residual[:, -1:, :]\r\n mask = None if mask is None else mask[:, -1:, :]\r\n\r\n if pos_emb is not None:\r\n x_att = self.self_attn(x_q, x, x, pos_emb, mask)\r\n else:\r\n x_att = self.self_attn(x_q, x, x, mask)\r\n\r\n if self.concat_after:\r\n x_concat = torch.cat((x, x_att), dim=-1)\r\n x = residual + stoch_layer_coeff * self.concat_linear(x_concat)\r\n else:\r\n x = residual + stoch_layer_coeff * self.dropout(x_att)\r\n if not self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n # convolution module\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n\r\n # feed forward module\r\n if self.feed_forward:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff(x)\r\n else:\r\n raise ValueError(\"not exit\")\r\n\r\n if self.conv_module is not None:\r\n x = self.norm_final(x)\r\n\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n\r\n return x, mask\r" }, { "identifier": "get_activation", "path": "src/clap_module/conformer/modules.py", "snippet": "def get_activation(act):\r\n \"\"\"Return activation function.\r\n \"\"\"\r\n # Lazy load to avoid unused import\r\n\r\n activation_funcs = {\r\n \"hardtanh\": torch.nn.Hardtanh,\r\n \"tanh\": torch.nn.Tanh,\r\n \"relu\": torch.nn.ReLU,\r\n \"selu\": torch.nn.SELU,\r\n \"swish\": Swish,\r\n }\r\n\r\n return activation_funcs[act]()\r" }, { "identifier": "VGG2L", "path": "src/clap_module/conformer/modules.py", "snippet": "class VGG2L(torch.nn.Module):\r\n \"\"\"VGG2L module for custom encoder.\r\n\r\n Args:\r\n idim: Input dimension.\r\n odim: Output dimension.\r\n pos_enc: Positional encoding class.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim: int, odim: int, pos_enc: torch.nn.Module = None):\r\n \"\"\"Construct a VGG2L object.\"\"\"\r\n super().__init__()\r\n\r\n self.vgg2l = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((3, 2)),\r\n torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((2, 2)),\r\n )\r\n\r\n if pos_enc is not None:\r\n self.output = torch.nn.Sequential(\r\n torch.nn.Linear(128 * ((idim // 2) // 2), odim), pos_enc\r\n )\r\n else:\r\n self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)\r\n\r\n def forward(\r\n self, feats: torch.Tensor, feats_mask: torch.Tensor\r\n ) -> Union[\r\n Tuple[torch.Tensor, torch.Tensor],\r\n Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\r\n ]:\r\n \"\"\"Forward VGG2L bottleneck.\r\n\r\n Args:\r\n feats: Feature sequences. (B, F, D_feats)\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_output: VGG output sequences.\r\n (B, sub(F), D_out) or ((B, sub(F), D_out), (B, sub(F), D_att))\r\n vgg_mask: Mask of VGG output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n feats = feats.unsqueeze(1)\r\n vgg_output = self.vgg2l(feats)\r\n\r\n b, c, t, f = vgg_output.size()\r\n\r\n vgg_output = self.output(\r\n vgg_output.transpose(1, 2).contiguous().view(b, t, c * f)\r\n )\r\n\r\n if feats_mask is not None:\r\n vgg_mask = self.create_new_mask(feats_mask)\r\n else:\r\n vgg_mask = feats_mask\r\n\r\n return vgg_output, vgg_mask\r\n\r\n def create_new_mask(self, feats_mask: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Create a subsampled mask of feature sequences.\r\n\r\n Args:\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_mask: Mask of VGG2L output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n vgg1_t_len = feats_mask.size(2) - (feats_mask.size(2) % 3)\r\n vgg_mask = feats_mask[:, :, :vgg1_t_len][:, :, ::3]\r\n\r\n vgg2_t_len = vgg_mask.size(2) - (vgg_mask.size(2) % 2)\r\n vgg_mask = vgg_mask[:, :, :vgg2_t_len][:, :, ::2]\r\n\r\n return vgg_mask\r" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)))\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, time1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "MultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class MultiHeadedAttention(nn.Module):\r\n \"\"\"Multi-Head Attention layer.\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate):\r\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n assert n_feat % n_head == 0\r\n # We assume d_v always equals d_k\r\n self.d_k = n_feat // n_head\r\n self.h = n_head\r\n self.linear_q = nn.Linear(n_feat, n_feat)\r\n self.linear_k = nn.Linear(n_feat, n_feat)\r\n self.linear_v = nn.Linear(n_feat, n_feat)\r\n self.linear_out = nn.Linear(n_feat, n_feat)\r\n self.attn = None\r\n self.dropout = nn.Dropout(p=dropout_rate)\r\n\r\n def forward_qkv(self, query, key, value):\r\n \"\"\"Transform query, key and value.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n\r\n Returns:\r\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\r\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\r\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\r\n\r\n \"\"\"\r\n n_batch = query.size(0)\r\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\r\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\r\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\r\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\r\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\r\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\r\n\r\n return q, k, v\r\n\r\n def forward_attention(self, value, scores, mask):\r\n \"\"\"Compute attention context vector.\r\n\r\n Args:\r\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\r\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\r\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Transformed value (#batch, time1, d_model)\r\n weighted by the attention score (#batch, time1, time2).\r\n\r\n \"\"\"\r\n n_batch = value.size(0)\r\n if mask is not None:\r\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\r\n min_value = torch.finfo(scores.dtype).min\r\n scores = scores.masked_fill(mask, min_value)\r\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\r\n mask, 0.0\r\n ) # (batch, head, time1, time2)\r\n else:\r\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\r\n\r\n p_attn = self.dropout(self.attn)\r\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\r\n x = (\r\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\r\n ) # (batch, time1, d_model)\r\n\r\n return self.linear_out(x) # (batch, time1, d_model)\r\n\r\n def forward(self, query, key, value, mask):\r\n \"\"\"Compute scaled dot product attention.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\r\n time1 means the length of query vector.\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)[\r\n :, :, :, : x.size(-1) // 2 + 1\r\n ] # only keep the positions from 0 to time2\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor\r\n (#batch, 2*time1-1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, 2*time1-1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "LegacyRelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\r\n \"\"\"Relative positional encoding module (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(\r\n d_model=d_model,\r\n dropout_rate=dropout_rate,\r\n max_len=max_len,\r\n reverse=True,\r\n )\r\n\r\n def forward(self, x):\r\n \"\"\"Compute positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n torch.Tensor: Positional embedding tensor (1, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[:, : x.size(1)]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "PositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\r\n \"\"\"Positional encoding.\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n reverse (bool): Whether to reverse the input position. Only for\r\n the class LegacyRelPositionalEncoding. We remove it in the current\r\n class RelPositionalEncoding.\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(PositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.reverse = reverse\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n self._register_load_state_dict_pre_hook(_pre_hook)\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n if self.pe.size(1) >= x.size(1):\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n pe = torch.zeros(x.size(1), self.d_model)\r\n if self.reverse:\r\n position = torch.arange(\r\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\r\n ).unsqueeze(1)\r\n else:\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale + self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "RelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\r\n \"\"\"Relative positional encoding module (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(RelPositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n # self.pe contains both positive and negative parts\r\n # the length of self.pe is 2 * input_len - 1\r\n if self.pe.size(1) >= x.size(1) * 2 - 1:\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n # Suppose `i` means to the position of query vecotr and `j` means the\r\n # position of key vector. We use position relative positions when keys\r\n # are to the left (i>j) and negative relative positions otherwise (i<j).\r\n pe_positive = torch.zeros(x.size(1), self.d_model)\r\n pe_negative = torch.zeros(x.size(1), self.d_model)\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe_positive[:, 0::2] = torch.sin(position * div_term)\r\n pe_positive[:, 1::2] = torch.cos(position * div_term)\r\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\r\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\r\n\r\n # Reserve the order of positive indices and concat both positive and\r\n # negative indices. This is used to support the shifting trick\r\n # as in https://arxiv.org/abs/1901.02860\r\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\r\n pe_negative = pe_negative[1:].unsqueeze(0)\r\n pe = torch.cat([pe_positive, pe_negative], dim=1)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[\r\n :,\r\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\r\n ]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "ScaledPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\r\n \"\"\"Scaled positional encoding module.\r\n\r\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\r\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\r\n\r\n def reset_parameters(self):\r\n \"\"\"Reset parameters.\"\"\"\r\n self.alpha.data = torch.tensor(1.0)\r\n\r\n def forward(self, x):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x + self.alpha * self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "LayerNorm", "path": "src/clap_module/conformer/modules.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\r\n \"\"\"Layer normalization module.\r\n\r\n Args:\r\n nout (int): Output dim size.\r\n dim (int): Dimension to be normalized.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, nout, dim=-1):\r\n \"\"\"Construct an LayerNorm object.\"\"\"\r\n super(LayerNorm, self).__init__(nout, eps=1e-12)\r\n self.dim = dim\r\n\r\n def forward(self, x):\r\n \"\"\"Apply layer normalization.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor.\r\n\r\n Returns:\r\n torch.Tensor: Normalized tensor.\r\n\r\n \"\"\"\r\n if self.dim == -1:\r\n return super(LayerNorm, self).forward(x)\r\n return (\r\n super(LayerNorm, self)\r\n .forward(x.transpose(self.dim, -1))\r\n .transpose(self.dim, -1)\r\n )\r" }, { "identifier": "Conv1dLinear", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\r\n \"\"\"Conv1D + Linear for Transformer block.\r\n\r\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize Conv1dLinear module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(Conv1dLinear, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x))\r" }, { "identifier": "MultiLayeredConv1d", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\r\n \"\"\"Multi-layered conv1d for Transformer block.\r\n\r\n This is a module of multi-leyered conv1d designed\r\n to replace positionwise feed-forward network\r\n in Transforner block, which is introduced in\r\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\r\n\r\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\r\n https://arxiv.org/pdf/1905.09263.pdf\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize MultiLayeredConv1d module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(MultiLayeredConv1d, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Conv1d(\r\n hidden_chans,\r\n in_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)\r" }, { "identifier": "PositionwiseFeedForward", "path": "src/clap_module/conformer/modules.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\r\n \"\"\"Positionwise feed forward layer.\r\n\r\n Args:\r\n idim (int): Input dimenstion.\r\n hidden_units (int): The number of hidden units.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\r\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.w_1 = torch.nn.Linear(idim, hidden_units)\r\n self.w_2 = torch.nn.Linear(hidden_units, idim)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n return self.w_2(self.dropout(self.activation(self.w_1(x))))\r" }, { "identifier": "repeat", "path": "src/clap_module/conformer/modules.py", "snippet": "def repeat(N, fn, layer_drop_rate=0.0):\r\n \"\"\"Repeat module N times.\r\n\r\n Args:\r\n N (int): Number of repeat time.\r\n fn (Callable): Function to generate module.\r\n layer_drop_rate (float): Probability of dropping out each fn (layer).\r\n\r\n Returns:\r\n MultiSequential: Repeated model instance.\r\n\r\n \"\"\"\r\n return MultiSequential(*[fn(n) for n in range(N)], layer_drop_rate=layer_drop_rate)\r" }, { "identifier": "Conv2dSubsampling", "path": "src/clap_module/conformer/sub_sampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\r\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\r\n\r\n Args:\r\n idim (int): Input dimension.\r\n odim (int): Output dimension.\r\n dropout_rate (float): Dropout rate.\r\n pos_enc (torch.nn.Module): Custom position encoding layer.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\r\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\r\n super(Conv2dSubsampling, self).__init__()\r\n self.conv = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(odim, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n )\r\n self.out = torch.nn.Sequential(\r\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\r\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\r\n )\r\n\r\n def forward(self, x, x_mask):\r\n \"\"\"Subsample x.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, idim).\r\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\r\n\r\n Returns:\r\n torch.Tensor: Subsampled tensor (#batch, time', odim),\r\n where time' = time // 4.\r\n torch.Tensor: Subsampled mask (#batch, 1, time'),\r\n where time' = time // 4.\r\n\r\n \"\"\"\r\n x = x.unsqueeze(1) # (b, c, t, f)\r\n x = self.conv(x)\r\n b, c, t, f = x.size()\r\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\r\n if x_mask is None:\r\n return x, None\r\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\r\n\r\n def __getitem__(self, key):\r\n \"\"\"Get item.\r\n\r\n When reset_parameters() is called, if use_scaled_pos_enc is used,\r\n return the positioning encoding.\r\n\r\n \"\"\"\r\n if key != -1:\r\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\r\n return self.out[key]\r" }, { "identifier": "AttentionPool1d", "path": "src/clap_module/feature_fusion.py", "snippet": "class AttentionPool1d(nn.Module):\r\n def __init__(\r\n self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None\r\n ):\r\n super().__init__()\r\n self.positional_embedding = nn.Parameter(\r\n torch.randn(spacial_dim + 1, embed_dim) / embed_dim\r\n # torch.randn(spacial_dim, embed_dim) / embed_dim\r\n )\r\n self.k_proj = nn.Linear(embed_dim, embed_dim)\r\n self.q_proj = nn.Linear(embed_dim, embed_dim)\r\n self.v_proj = nn.Linear(embed_dim, embed_dim)\r\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\r\n self.num_heads = num_heads\r\n\r\n def forward(self, x):\r\n # import pdb; pdb.set_trace()\r\n x = x.permute(1, 0, 2) # B*L*D -> L*B*D; NCHW -> (HW)NC\r\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\r\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\r\n x, _ = F.multi_head_attention_forward(\r\n query=x,\r\n key=x,\r\n value=x,\r\n embed_dim_to_check=x.shape[-1],\r\n num_heads=self.num_heads,\r\n q_proj_weight=self.q_proj.weight,\r\n k_proj_weight=self.k_proj.weight,\r\n v_proj_weight=self.v_proj.weight,\r\n in_proj_weight=None,\r\n in_proj_bias=torch.cat(\r\n [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]\r\n ),\r\n bias_k=None,\r\n bias_v=None,\r\n add_zero_attn=False,\r\n dropout_p=0,\r\n out_proj_weight=self.c_proj.weight,\r\n out_proj_bias=self.c_proj.bias,\r\n use_separate_proj_weight=True,\r\n training=self.training,\r\n need_weights=False,\r\n )\r\n\r\n return x[0] # B*D\r" }, { "identifier": "DAF", "path": "src/clap_module/feature_fusion.py", "snippet": "class DAF(nn.Module):\r\n \"\"\"直接相加 DirectAddFuse\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super(DAF, self).__init__()\r\n\r\n def forward(self, x, residual):\r\n return x + residual\r" }, { "identifier": "AFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class AFF(nn.Module):\r\n \"\"\"多特征融合 AFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(AFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported.'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xo = 2 * x * wei + 2 * residual * (1 - wei)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" }, { "identifier": "iAFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class iAFF(nn.Module):\r\n \"\"\"多特征融合 iAFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(iAFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xi = x * wei + residual * (1 - wei)\r\n\r\n xl2 = self.local_att2(xi)\r\n xg2 = self.global_att(xi)\r\n xlg2 = xl2 + xg2\r\n wei2 = self.sigmoid(xlg2)\r\n xo = x * wei2 + residual * (1 - wei2)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" } ]
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,312
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d":
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d":
self.embed = Conv2dSubsampling(
16
2023-11-25 02:38:32+00:00
16k
facebookresearch/ExPLORe
train_finetuning_pixels.py
[ { "identifier": "DrQLearner", "path": "rlpd/agents/drq/drq_learner.py", "snippet": "class DrQLearner(SACLearner):\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n temp_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n num_qs: int = 2,\n num_min_qs: Optional[int] = None,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n target_entropy: Optional[float] = None,\n init_temperature: float = 1.0,\n backup_entropy: bool = True,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n bc_coeff: float = 0,\n ):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n if target_entropy is None:\n target_entropy = -action_dim / 2\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=True,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = FrozenDict(actor_def.init(actor_key, observations)[\"params\"])\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_cls = partial(Ensemble, net_cls=critic_cls, num=num_qs)\n critic_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=critic_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n critic_params = FrozenDict(\n critic_def.init(critic_key, observations, actions)[\"params\"]\n )\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n target_critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n temp_def = Temperature(init_temperature)\n temp_params = FrozenDict(temp_def.init(temp_key)[\"params\"])\n temp = TrainState.create(\n apply_fn=temp_def.apply,\n params=temp_params,\n tx=optax.adam(learning_rate=temp_lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n temp=temp,\n target_entropy=target_entropy,\n tau=tau,\n discount=discount,\n num_qs=num_qs,\n num_min_qs=num_min_qs,\n backup_entropy=backup_entropy,\n data_augmentation_fn=data_augmentation_fn,\n bc_coeff=bc_coeff,\n )\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n new_agent = self\n\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n actor = _share_encoder(source=new_agent.critic, target=new_agent.actor)\n new_agent = new_agent.replace(actor=actor)\n\n rng, key = jax.random.split(new_agent.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n\n new_agent = new_agent.replace(rng=rng)\n\n return SACLearner.update(new_agent, batch, utd_ratio)" }, { "identifier": "PixelBCAgent", "path": "rlpd/agents/drq/bc.py", "snippet": "class PixelBCAgent(BCAgent):\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n encoder: str = \"d4pg\",\n ):\n assert encoder == \"d4pg\"\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key = jax.random.split(rng, 2)\n\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=False,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = FrozenDict(actor_def.init(actor_key, observations)[\"params\"])\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n return cls(\n rng=rng,\n actor=actor,\n )" }, { "identifier": "PixelRM", "path": "rlpd/agents/drq/rm.py", "snippet": "class PixelRM(struct.PyTreeNode):\n rng: PRNGKey\n r_net: TrainState\n m_net: TrainState\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key = jax.random.split(rng)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n net_cls = partial(StateValue, base_cls=base_cls)\n ucb_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=net_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n r_params = FrozenDict(ucb_def.init(key, observations)[\"params\"])\n r_net = TrainState.create(\n apply_fn=ucb_def.apply,\n params=r_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n m_params = FrozenDict(ucb_def.init(key, observations)[\"params\"])\n m_net = TrainState.create(\n apply_fn=ucb_def.apply,\n params=m_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n r_net=r_net,\n m_net=m_net,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n def _update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n def r_loss_fn(r_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n rs = self.r_net.apply_fn({\"params\": r_params}, batch[\"observations\"])\n\n loss = ((rs - batch[\"rewards\"]) ** 2.0).mean()\n return loss, {\"r_loss\": loss}\n\n grads, r_info = jax.grad(r_loss_fn, has_aux=True)(self.r_net.params)\n r_net = self.r_net.apply_gradients(grads=grads)\n\n def m_loss_fn(m_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n ms = self.m_net.apply_fn({\"params\": m_params}, batch[\"observations\"])\n\n loss = optax.sigmoid_binary_cross_entropy(ms, batch[\"masks\"]).mean()\n return loss, {\"m_loss\": loss}\n\n grads, m_info = jax.grad(m_loss_fn, has_aux=True)(self.m_net.params)\n m_net = self.m_net.apply_gradients(grads=grads)\n\n return self.replace(r_net=r_net, m_net=m_net), {**r_info, **m_info}\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_self = self.replace(rng=rng)\n\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_self, info = new_self._update(mini_batch)\n\n return new_self, info\n\n @jax.jit\n def get_reward(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n rewards = self.r_net.apply_fn(\n {\"params\": self.r_net.params}, batch[\"observations\"]\n )\n return rewards\n\n @jax.jit\n def get_mask(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n logits = self.m_net.apply_fn(\n {\"params\": self.m_net.params}, batch[\"observations\"]\n )\n return jax.nn.sigmoid(logits)" }, { "identifier": "PixelRND", "path": "rlpd/agents/drq/rnd.py", "snippet": "class PixelRND(struct.PyTreeNode):\n rng: PRNGKey\n net: TrainState\n frozen_net: TrainState\n coeff: float = struct.field(pytree_node=False)\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n coeff: float = 1.0,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n feature_dim: int = 256,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key1, key2 = jax.random.split(rng, 3)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n rnd_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n rnd_cls = partial(StateFeature, base_cls=rnd_base_cls, feature_dim=feature_dim)\n net_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=rnd_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n params = FrozenDict(net_def.init(key1, observations)[\"params\"])\n net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n frozen_params = FrozenDict(net_def.init(key2, observations)[\"params\"])\n frozen_net = TrainState.create(\n apply_fn=net_def.apply,\n params=frozen_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n net=net,\n frozen_net=frozen_net,\n coeff=coeff,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n @jax.jit\n def update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_self = self.replace(rng=rng)\n\n def loss_fn(params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n feats = new_self.net.apply_fn({\"params\": params}, batch[\"observations\"])\n frozen_feats = new_self.frozen_net.apply_fn(\n {\"params\": new_self.frozen_net.params}, batch[\"observations\"]\n )\n\n loss = ((feats - frozen_feats) ** 2.0).mean()\n return loss, {\"rnd_loss\": loss}\n\n grads, info = jax.grad(loss_fn, has_aux=True)(new_self.net.params)\n net = new_self.net.apply_gradients(grads=grads)\n\n return new_self.replace(net=net), info\n\n @jax.jit\n def get_reward(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n feats = self.net.apply_fn({\"params\": self.net.params}, batch[\"observations\"])\n frozen_feats = self.net.apply_fn(\n {\"params\": self.frozen_net.params}, batch[\"observations\"]\n )\n return jnp.mean((feats - frozen_feats) ** 2.0, axis=-1) * self.coeff" }, { "identifier": "MemoryEfficientReplayBuffer", "path": "rlpd/data/memory_efficient_replay_buffer.py", "snippet": "class MemoryEfficientReplayBuffer(ReplayBuffer):\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n capacity: int,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n ):\n self.pixel_keys = pixel_keys\n\n observation_space = copy.deepcopy(observation_space)\n self._num_stack = None\n for pixel_key in self.pixel_keys:\n pixel_obs_space = observation_space.spaces[pixel_key]\n if self._num_stack is None:\n self._num_stack = pixel_obs_space.shape[-1]\n else:\n assert self._num_stack == pixel_obs_space.shape[-1]\n self._unstacked_dim_size = pixel_obs_space.shape[-2]\n low = pixel_obs_space.low[..., 0]\n high = pixel_obs_space.high[..., 0]\n unstacked_pixel_obs_space = Box(\n low=low, high=high, dtype=pixel_obs_space.dtype\n )\n observation_space.spaces[pixel_key] = unstacked_pixel_obs_space\n\n next_observation_space_dict = copy.deepcopy(observation_space.spaces)\n for pixel_key in self.pixel_keys:\n next_observation_space_dict.pop(pixel_key)\n next_observation_space = gym.spaces.Dict(next_observation_space_dict)\n\n self._first = True\n self._is_correct_index = np.full(capacity, False, dtype=bool)\n\n super().__init__(\n observation_space,\n action_space,\n capacity,\n next_observation_space=next_observation_space,\n )\n\n def insert(self, data_dict: DatasetDict):\n if self._insert_index == 0 and self._capacity == len(self) and not self._first:\n indxs = np.arange(len(self) - self._num_stack, len(self))\n for indx in indxs:\n element = super().sample(1, indx=indx)\n self._is_correct_index[self._insert_index] = False\n super().insert(element)\n\n data_dict = data_dict.copy()\n data_dict[\"observations\"] = data_dict[\"observations\"].copy()\n data_dict[\"next_observations\"] = data_dict[\"next_observations\"].copy()\n\n obs_pixels = {}\n next_obs_pixels = {}\n for pixel_key in self.pixel_keys:\n obs_pixels[pixel_key] = data_dict[\"observations\"].pop(pixel_key)\n next_obs_pixels[pixel_key] = data_dict[\"next_observations\"].pop(pixel_key)\n\n if self._first:\n for i in range(self._num_stack):\n for pixel_key in self.pixel_keys:\n data_dict[\"observations\"][pixel_key] = obs_pixels[pixel_key][..., i]\n\n self._is_correct_index[self._insert_index] = False\n super().insert(data_dict)\n\n for pixel_key in self.pixel_keys:\n data_dict[\"observations\"][pixel_key] = next_obs_pixels[pixel_key][..., -1]\n\n self._first = data_dict[\"dones\"]\n\n self._is_correct_index[self._insert_index] = True\n super().insert(data_dict)\n\n for i in range(self._num_stack):\n indx = (self._insert_index + i) % len(self)\n self._is_correct_index[indx] = False\n\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n pack_obs_and_next_obs: bool = False,\n ) -> frozen_dict.FrozenDict:\n \"\"\"Samples from the replay buffer.\n\n Args:\n batch_size: Minibatch size.\n keys: Keys to sample.\n indx: Take indices instead of sampling.\n pack_obs_and_next_obs: whether to pack img and next_img into one image.\n It's useful when they have overlapping frames.\n\n Returns:\n A frozen dictionary.\n \"\"\"\n\n if indx is None:\n if hasattr(self.np_random, \"integers\"):\n indx = self.np_random.integers(len(self), size=batch_size)\n else:\n indx = self.np_random.randint(len(self), size=batch_size)\n\n for i in range(batch_size):\n while not self._is_correct_index[indx[i]]:\n if hasattr(self.np_random, \"integers\"):\n indx[i] = self.np_random.integers(len(self))\n else:\n indx[i] = self.np_random.randint(len(self))\n else:\n pass\n\n if keys is None:\n keys = self.dataset_dict.keys()\n else:\n assert \"observations\" in keys\n\n keys = list(keys)\n keys.remove(\"observations\")\n\n batch = super().sample(batch_size, keys, indx)\n batch = batch.unfreeze()\n\n obs_keys = self.dataset_dict[\"observations\"].keys()\n obs_keys = list(obs_keys)\n for pixel_key in self.pixel_keys:\n obs_keys.remove(pixel_key)\n\n batch[\"observations\"] = {}\n for k in obs_keys:\n batch[\"observations\"][k] = _sample(\n self.dataset_dict[\"observations\"][k], indx\n )\n\n for pixel_key in self.pixel_keys:\n obs_pixels = self.dataset_dict[\"observations\"][pixel_key]\n obs_pixels = np.lib.stride_tricks.sliding_window_view(\n obs_pixels, self._num_stack + 1, axis=0\n )\n obs_pixels = obs_pixels[indx - self._num_stack]\n\n if pack_obs_and_next_obs:\n batch[\"observations\"][pixel_key] = obs_pixels\n else:\n batch[\"observations\"][pixel_key] = obs_pixels[..., :-1]\n if \"next_observations\" in keys:\n batch[\"next_observations\"][pixel_key] = obs_pixels[..., 1:]\n\n return frozen_dict.freeze(batch)" }, { "identifier": "ReplayBuffer", "path": "rlpd/data/replay_buffer.py", "snippet": "class ReplayBuffer(Dataset):\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n capacity: int,\n next_observation_space: Optional[gym.Space] = None,\n ):\n if next_observation_space is None:\n next_observation_space = observation_space\n\n observation_data = _init_replay_dict(observation_space, capacity)\n next_observation_data = _init_replay_dict(next_observation_space, capacity)\n dataset_dict = dict(\n observations=observation_data,\n next_observations=next_observation_data,\n actions=np.empty((capacity, *action_space.shape), dtype=action_space.dtype),\n rewards=np.empty((capacity,), dtype=np.float32),\n masks=np.empty((capacity,), dtype=np.float32),\n dones=np.empty((capacity,), dtype=np.float32),\n )\n\n super().__init__(dataset_dict)\n\n self._size = 0\n self._capacity = capacity\n self._insert_index = 0\n\n def __len__(self) -> int:\n return self._size\n\n def insert(self, data_dict: DatasetDict):\n _insert_recursively(self.dataset_dict, data_dict, self._insert_index)\n\n self._insert_index = (self._insert_index + 1) % self._capacity\n self._size = min(self._size + 1, self._capacity)\n\n def insert_batch(self, data_dict: DatasetDict):\n first_key = list(data_dict.keys())[0]\n batch_size = data_dict[first_key].shape[0]\n\n if self._insert_index + batch_size > self._capacity:\n self._insert_index = 0\n self._size = max(self._size, self._insert_index + batch_size)\n _insert_recursively_batch(\n self.dataset_dict, data_dict, self._insert_index, batch_size\n )\n\n def get_iterator(self, queue_size: int = 2, sample_args: dict = {}):\n # See https://flax.readthedocs.io/en/latest/_modules/flax/jax_utils.html#prefetch_to_device\n # queue_size = 2 should be ok for one GPU.\n\n queue = collections.deque()\n\n def enqueue(n):\n for _ in range(n):\n data = self.sample(**sample_args)\n queue.append(jax.device_put(data))\n\n enqueue(queue_size)\n while queue:\n yield queue.popleft()\n enqueue(1)" }, { "identifier": "evaluate", "path": "rlpd/evaluation.py", "snippet": "def evaluate(agent, env: gym.Env, num_episodes: int) -> Dict[str, float]:\n\n trajs = []\n cum_returns = []\n cum_lengths = []\n for i in range(num_episodes):\n observation, done = env.reset(), False\n traj = [observation]\n cum_return = 0\n cum_length = 0\n while not done:\n action = agent.eval_actions(observation)\n observation, reward, done, _ = env.step(action)\n cum_return += reward\n cum_length += 1\n traj.append(observation)\n cum_returns.append(cum_return)\n cum_lengths.append(cum_length)\n trajs.append({\"observation\": np.stack(traj, axis=0)})\n return {\"return\": np.mean(cum_returns), \"length\": np.mean(cum_lengths)}, trajs" }, { "identifier": "wrap_pixels", "path": "rlpd/wrappers/pixels.py", "snippet": "def wrap_pixels(\n env: gym.Env,\n action_repeat: int,\n image_size: int = 84,\n num_stack: Optional[int] = 3,\n camera_id: int = 0,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n) -> gym.Env:\n if action_repeat > 1:\n env = RepeatAction(env, action_repeat)\n\n env = UniversalSeed(env)\n env = gym.wrappers.RescaleAction(env, -1, 1)\n\n env = PixelObservationWrapper(\n env,\n pixels_only=True,\n render_kwargs={\n \"pixels\": {\n \"height\": image_size,\n \"width\": image_size,\n \"camera_id\": camera_id,\n }\n },\n pixel_keys=pixel_keys,\n )\n\n if num_stack is not None:\n env = FrameStack(env, num_stack=num_stack)\n\n env = gym.wrappers.ClipAction(env)\n\n return env, pixel_keys" }, { "identifier": "PixelICVF", "path": "rlpd/agents/drq/icvf.py", "snippet": "class PixelICVF(struct.PyTreeNode):\n rng: PRNGKey\n net: TrainState\n target_net: TrainState\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n feature_dim: int = 256,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n **kwargs,\n ):\n print(\"Got additional kwargs: \", kwargs)\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key1, key2 = jax.random.split(rng, 3)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n rnd_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n rnd_cls = partial(ICVF, base_cls=rnd_base_cls, feature_dim=feature_dim)\n net_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=rnd_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n params = FrozenDict(net_def.init(key1, observations)[\"params\"])\n net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n target_net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n net=net,\n target_net=target_net,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n def _update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n def loss_fn(params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n def get_v(params, s, g, z):\n phi = self.net.apply_fn({\"params\": params}, s)[\"phi\"]\n psi = self.net.apply_fn({\"params\": params}, g)[\"psi\"]\n T = self.net.apply_fn({\"params\": params}, z)[\"T\"]\n phi_T = apply_layernorm(phi * T)\n psi_T = apply_layernorm(psi * T)\n return -1 * optax.safe_norm(phi_T - psi_T, 1e-3, axis=-1)\n\n V = get_v(\n params, batch[\"observations\"], batch[\"goals\"], batch[\"desired_goals\"]\n )\n nV = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"goals\"],\n batch[\"desired_goals\"],\n )\n target_V = batch[\"rewards\"] + 0.99 * batch[\"masks\"] * nV\n\n V_z = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"desired_goals\"],\n batch[\"desired_goals\"],\n )\n nV_z = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"desired_goals\"],\n batch[\"desired_goals\"],\n )\n adv = batch[\"desired_rewards\"] + 0.99 * batch[\"desired_masks\"] * nV_z - V_z\n\n def expectile_fn(adv, loss, expectile):\n weight = jnp.where(adv >= 0, expectile, 1 - expectile)\n return weight * loss\n\n def masked_mean(x, mask):\n mask = (mask > 0).astype(jnp.float32)\n return jnp.sum(x * mask) / (1e-5 + jnp.sum(mask))\n\n loss = expectile_fn(adv, jnp.square(V - target_V), 0.9).mean()\n return loss, {\n \"icvf_loss\": loss,\n \"V_success\": masked_mean(V, 1.0 - batch[\"masks\"]),\n \"V_failure\": masked_mean(V, batch[\"masks\"]),\n }\n\n grads, info = jax.grad(loss_fn, has_aux=True)(self.net.params)\n net = self.net.apply_gradients(grads=grads)\n target_params = optax.incremental_update(\n self.net.params, self.target_net.params, 0.005\n )\n target_net = self.target_net.replace(params=target_params)\n return self.replace(net=net, target_net=target_net), info\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n # if \"pixels\" not in batch[\"next_observations\"]:\n # batch = _unpack(batch)\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n goals = self.data_augmentation_fn(key, batch[\"goals\"])\n desired_goals = self.data_augmentation_fn(key, batch[\"desired_goals\"])\n\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n \"goals\": goals,\n \"desired_goals\": desired_goals,\n }\n )\n new_self = self.replace(rng=rng)\n\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_self, info = new_self._update(mini_batch)\n\n return new_self, info" }, { "identifier": "gc_dataset", "path": "rlpd/gc_dataset.py", "snippet": "class GCDataset:\nclass GCSDataset(GCDataset):\n def get_default_config():\n def __post_init__(self):\n def sample_goals(self, indx, p_randomgoal=None, p_trajgoal=None, p_currgoal=None):\n def sample(self, batch_size: int, indx=None):\n def get_default_config():\n def sample(self, batch_size: int, indx=None):" }, { "identifier": "Dataset", "path": "rlpd/data/dataset.py", "snippet": "class Dataset(object):\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n self.dataset_dict = dataset_dict\n self.dataset_len = _check_lengths(dataset_dict)\n\n # Seeding similar to OpenAI Gym:\n # https://github.com/openai/gym/blob/master/gym/spaces/space.py#L46\n self._np_random = None\n self._seed = None\n if seed is not None:\n self.seed(seed)\n\n @property\n def np_random(self) -> np.random.RandomState:\n if self._np_random is None:\n self.seed()\n return self._np_random\n\n def seed(self, seed: Optional[int] = None) -> list:\n self._np_random, self._seed = seeding.np_random(seed)\n return [self._seed]\n\n def __len__(self) -> int:\n return self.dataset_len\n\n def get_iter(self, batch_size):\n for i in range(len(self) // batch_size):\n indx = np.arange(i * batch_size, (i + 1) * batch_size)\n indx = np.clip(indx, a_min=0, a_max=len(self) - 1)\n batch = dict()\n keys = self.dataset_dict.keys()\n\n for k in keys:\n if isinstance(self.dataset_dict[k], dict):\n batch[k] = _sample(self.dataset_dict[k], indx)\n else:\n batch[k] = self.dataset_dict[k][indx]\n\n yield frozen_dict.freeze(batch)\n\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n ) -> frozen_dict.FrozenDict:\n if indx is None:\n if hasattr(self.np_random, \"integers\"):\n indx = self.np_random.integers(len(self), size=batch_size)\n else:\n indx = self.np_random.randint(len(self), size=batch_size)\n\n batch = dict()\n\n if keys is None:\n keys = self.dataset_dict.keys()\n\n for k in keys:\n if isinstance(self.dataset_dict[k], dict):\n batch[k] = _sample(self.dataset_dict[k], indx)\n else:\n batch[k] = self.dataset_dict[k][indx]\n\n return frozen_dict.freeze(batch)\n\n def sample_jax(self, batch_size: int, keys: Optional[Iterable[str]] = None):\n if not hasattr(self, \"rng\"):\n self.rng = jax.random.PRNGKey(self._seed or 42)\n\n if keys is None:\n keys = self.dataset_dict.keys()\n\n jax_dataset_dict = {k: self.dataset_dict[k] for k in keys}\n jax_dataset_dict = jax.device_put(jax_dataset_dict)\n\n @jax.jit\n def _sample_jax(rng):\n key, rng = jax.random.split(rng)\n indx = jax.random.randint(\n key, (batch_size,), minval=0, maxval=len(self)\n )\n return rng, jax.tree_map(\n lambda d: jnp.take(d, indx, axis=0), jax_dataset_dict\n )\n\n self._sample_jax = _sample_jax\n\n self.rng, sample = self._sample_jax(self.rng)\n return sample\n\n def split(self, ratio: float) -> Tuple[\"Dataset\", \"Dataset\"]:\n assert 0 < ratio and ratio < 1\n train_index = np.index_exp[: int(self.dataset_len * ratio)]\n test_index = np.index_exp[int(self.dataset_len * ratio) :]\n\n index = np.arange(len(self), dtype=np.int32)\n self.np_random.shuffle(index)\n train_index = index[: int(self.dataset_len * ratio)]\n test_index = index[int(self.dataset_len * ratio) :]\n\n train_dataset_dict = _subselect(self.dataset_dict, train_index)\n test_dataset_dict = _subselect(self.dataset_dict, test_index)\n return Dataset(train_dataset_dict), Dataset(test_dataset_dict)\n\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n episode_starts = [0]\n episode_ends = []\n\n episode_return = 0\n episode_returns = []\n\n for i in range(len(self)):\n episode_return += self.dataset_dict[\"rewards\"][i]\n\n if self.dataset_dict[\"dones\"][i]:\n episode_returns.append(episode_return)\n episode_ends.append(i + 1)\n if i + 1 < len(self):\n episode_starts.append(i + 1)\n episode_return = 0.0\n\n return episode_starts, episode_ends, episode_returns\n\n def filter_by_fn(self, fn):\n bool_indx = np.full((len(self),), False, dtype=bool)\n for i in range(len(self)):\n tran = {k: v[i] for k, v in self.dataset_dict.items()}\n bool_indx[i] = fn(tran)\n\n self.dataset_dict = _subselect(self.dataset_dict, bool_indx)\n self.dataset_len = _check_lengths(self.dataset_dict)\n\n def filter(\n self, take_top: Optional[float] = None, threshold: Optional[float] = None\n ):\n assert (take_top is None and threshold is not None) or (\n take_top is not None and threshold is None\n )\n\n (\n episode_starts,\n episode_ends,\n episode_returns,\n ) = self._trajectory_boundaries_and_returns()\n\n if take_top is not None:\n threshold = np.percentile(episode_returns, 100 - take_top)\n\n bool_indx = np.full((len(self),), False, dtype=bool)\n\n for i in range(len(episode_returns)):\n if episode_returns[i] >= threshold:\n bool_indx[episode_starts[i] : episode_ends[i]] = True\n\n self.dataset_dict = _subselect(self.dataset_dict, bool_indx)\n\n self.dataset_len = _check_lengths(self.dataset_dict)\n\n def normalize_returns(self, scaling: float = 1000):\n (_, _, episode_returns) = self._trajectory_boundaries_and_returns()\n self.dataset_dict[\"rewards\"] /= np.max(episode_returns) - np.min(\n episode_returns\n )\n self.dataset_dict[\"rewards\"] *= scaling" }, { "identifier": "COGDataset", "path": "rlpd/data/cog_datasets.py", "snippet": "class COGDataset(MemoryEfficientReplayBuffer):\n def __init__(\n self,\n env: gym.Env,\n dataset_path: str,\n capacity: int = 500_000,\n subsample_ratio: float = 1.0,\n pixel_keys: tuple = (\"pixels\",),\n np_rng = None,\n load_successes: bool = True,\n ):\n self.np_rng = np_rng\n super().__init__(\n env.observation_space,\n env.action_space,\n capacity=capacity,\n pixel_keys=pixel_keys\n )\n self.successful_offline_prior_trajs = []\n self.successful_offline_task_trajs = []\n \n self._load_data_from_dir(dataset_path, subsample_ratio)\n \n self.load_successes = load_successes\n if self.load_successes:\n self._load_successful_traj(dataset_path)\n\n def load_successful_traj(self):\n assert self.load_successes, \"did not load successful trajectories upon making this dataset\"\n prior_idx = self.np_rng.integers(len(self.successful_offline_prior_trajs))\n task_idx = self.np_rng.integers(len(self.successful_offline_task_trajs))\n prior_traj = self.successful_offline_prior_trajs[prior_idx]\n task_traj = self.successful_offline_task_trajs[task_idx]\n return prior_traj + task_traj\n \n def _load_data_from_dir(self, dataset_path, subsample_ratio=1.0):\n print(\"subsample ratio:\", subsample_ratio * subsample_ratio) # sub-sampled twice\n for f in os.listdir(dataset_path):\n full_path = os.path.join(dataset_path, f)\n if f.endswith('.npy'):\n print(\"*\"*20, \"\\nloading data from:\", full_path)\n data = np.load(full_path, allow_pickle=True)\n print(\"prior subsampling # trajs:\", len(data))\n data = self._subsample_data(data, subsample_ratio)\n self._load_data(data, subsample_ratio)\n print(\"post subsampling # trajs:\", len(self))\n \n def _subsample_data(self, data, r=1.0):\n assert 0 <= r <= 1\n n = len(data)\n idxs = self.np_rng.choice(n, size=int(n*r), replace=False)\n return data[idxs]\n\n def _load_data(self, data, subsample_ratio=1.0):\n cutoff = int(len(data) * subsample_ratio)\n for i, traj in enumerate(data):\n if i > cutoff:\n break\n trans = dict_to_list(traj)\n for tran in trans:\n data_dict = self._make_data_dict(tran)\n self.insert(data_dict)\n \n def _load_successful_traj(self, dataset_path):\n # load successful offline trajectories for visualizations / evaluation\n prior_data = np.load(os.path.join(dataset_path, 'successful', 'prior_success.npy'), allow_pickle=True)\n task_data = np.load(os.path.join(dataset_path, 'successful', 'task_success.npy'), allow_pickle=True)\n\n for traj in prior_data:\n trans = dict_to_list(traj)\n trans = [self._make_data_dict(tran) for tran in trans]\n self.successful_offline_prior_trajs.append(trans)\n\n for traj in task_data:\n trans = dict_to_list(traj)\n trans = [self._make_data_dict(tran) for tran in trans]\n self.successful_offline_task_trajs.append(trans)\n\n def _make_data_dict(self, tran):\n return dict(\n observations={\"pixels\": np.array(tran[\"observations\"][\"image\"])[..., None]},\n actions=np.array(tran[\"actions\"]),\n next_observations={\"pixels\": np.array(tran[\"next_observations\"][\"image\"])[..., None]},\n rewards=np.array(tran[\"rewards\"]),\n masks=1-np.array(tran[\"terminals\"], dtype=float),\n dones=np.array(tran[\"agent_infos\"][\"done\"])\n )" } ]
import os import numpy as np import tqdm import wandb import matplotlib.pyplot as plt import pickle import roboverse import types import jax import jax.numpy as jnp from absl import app, flags from flax.core import FrozenDict from ml_collections import config_flags from flax.core import frozen_dict from flax.training import checkpoints from rlpd.agents import DrQLearner, PixelRND, PixelRM, PixelBCAgent from rlpd.data import MemoryEfficientReplayBuffer, ReplayBuffer from rlpd.evaluation import evaluate from rlpd.wrappers import wrap_pixels from rlpd.agents.drq.icvf import PixelICVF from rlpd import gc_dataset from gym.wrappers import TimeLimit, FilterObservation, RecordEpisodeStatistics from rlpd.data import Dataset from rlpd.data.cog_datasets import COGDataset from functools import partial
13,592
) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed) ds = COGDataset( env=env, dataset_path=dataset_path, capacity=300000, subsample_ratio=FLAGS.dataset_subsample_ratio, np_rng=np_rng, ) ds.seed(FLAGS.seed) ds_minr = ds.dataset_dict["rewards"][: len(ds)].min() assert -10 < ds_minr < 10, "maybe sampling reward outside of buffer range" ds_iterator = ds.get_iterator( sample_args={ "batch_size": int(FLAGS.batch_size * FLAGS.utd_ratio * FLAGS.offline_ratio), "pack_obs_and_next_obs": True, } ) replay_buffer = MemoryEfficientReplayBuffer( env.observation_space, env.action_space, FLAGS.max_steps ) replay_buffer_iterator = replay_buffer.get_iterator( sample_args={ "batch_size": int( FLAGS.batch_size * FLAGS.utd_ratio * (1 - FLAGS.offline_ratio) ), "pack_obs_and_next_obs": True, } ) replay_buffer.seed(FLAGS.seed) ########### MODELS ########### # Crashes on some setups if agent is created before replay buffer. kwargs = dict(FLAGS.config) model_cls = kwargs.pop("model_cls") agent = globals()[model_cls].create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) if FLAGS.offline_relabel_type != "gt": kwargs = dict(FLAGS.rm_config) model_cls = kwargs.pop("model_cls") rm = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rm = None if FLAGS.use_rnd_offline or FLAGS.use_rnd_online: kwargs = dict(FLAGS.rnd_config) model_cls = kwargs.pop("model_cls") rnd = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rnd = None # Pre-training record_step = 0 # ICVF training and initialize RM and RND with ICVF encoder if FLAGS.use_icvf: # assert rm is not None or rnd is not None, "ICVF is not needed in this configuration" icvf = PixelICVF.create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **dict(FLAGS.config), )
""" Modified from https://github.com/ikostrikov/rlpd/blob/main/rlpd/train_finetuning_pixels.py Original lincense information: MIT License Copyright (c) 2022 Ilya Kostrikov, Philip J. Ball, Laura Smith Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #! /usr/bin/env python ### cog imports ### ### cog imports ### FLAGS = flags.FLAGS flags.DEFINE_string("project_name", "explore-cog", "wandb project name.") flags.DEFINE_string("env_name", "cheetah-run-v0", "Environment name.") flags.DEFINE_float( "dataset_subsample_ratio", 0.1, "Ratio of the dataset to subsample (done twice)" ) flags.DEFINE_bool("use_icvf", False, "Whether to use the icvf encoder") flags.DEFINE_float("offline_ratio", 0.5, "Offline ratio.") flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_integer("eval_episodes", 100, "Number of episodes used for evaluation.") flags.DEFINE_integer("log_interval", 1000, "Logging interval.") flags.DEFINE_integer("eval_interval", 5000, "Eval interval.") flags.DEFINE_integer("batch_size", 256, "Mini batch size.") flags.DEFINE_integer("max_steps", 500000, "Number of training steps.") flags.DEFINE_integer( "start_training", 5000, "Number of training steps to start training." ) flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.") flags.DEFINE_string("save_dir", "exp_data_cog", "Directory to save checkpoints.") flags.DEFINE_bool("checkpoint_model", False, "save model") flags.DEFINE_bool("checkpoint_buffer", False, "save replay buffer") flags.DEFINE_integer("utd_ratio", 1, "Update to data ratio.") flags.DEFINE_float("bc_pretrain_rollin", 0.0, "rollin coeff") flags.DEFINE_integer( "bc_pretrain_steps", 10000, "Pre-train BC policy for a number of steps on pure offline data", ) config_flags.DEFINE_config_file( "config", "configs/rlpd_pixels_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rm_config", "configs/pixel_rm_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rnd_config", "configs/pixel_rnd_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "bc_config", "configs/pixel_bc_config.py", "File path to the training hyperparameter configuration", lock_config=False, ) flags.DEFINE_string( "offline_relabel_type", "gt", "Whether to use reward from the offline dataset. [gt/pred/min]", ) flags.DEFINE_boolean("use_rnd_offline", False, "Whether to use rnd offline.") flags.DEFINE_boolean("use_rnd_online", False, "Whether to use rnd online.") def combine(one_dict, other_dict): combined = {} for k, v in one_dict.items(): if isinstance(v, FrozenDict) or isinstance(v, dict): if len(v) == 0: combined[k] = v else: combined[k] = combine(v, other_dict[k]) else: tmp = np.empty( (v.shape[0] + other_dict[k].shape[0], *v.shape[1:]), dtype=v.dtype ) tmp[0::2] = v tmp[1::2] = other_dict[k] combined[k] = tmp return FrozenDict(combined) def add_prefix(prefix, dict): return {prefix + k: v for k, v in dict.items()} def main(_): wandb.init(project=FLAGS.project_name, mode="online") wandb.config.update(FLAGS) if FLAGS.save_dir is not None: log_dir = os.path.join( FLAGS.save_dir, f"{FLAGS.env_name}-s{FLAGS.seed}-icvf_{FLAGS.use_icvf}-ours_{FLAGS.use_rnd_offline}", ) print("logging to", log_dir) if FLAGS.checkpoint_model: chkpt_dir = os.path.join(log_dir, "checkpoints") os.makedirs(chkpt_dir, exist_ok=True) if FLAGS.checkpoint_buffer: buffer_dir = os.path.join(log_dir, "buffers") os.makedirs(buffer_dir, exist_ok=True) def wrap(env): return wrap_pixels( env, action_repeat=1, num_stack=1, camera_id=0, ) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed) ds = COGDataset( env=env, dataset_path=dataset_path, capacity=300000, subsample_ratio=FLAGS.dataset_subsample_ratio, np_rng=np_rng, ) ds.seed(FLAGS.seed) ds_minr = ds.dataset_dict["rewards"][: len(ds)].min() assert -10 < ds_minr < 10, "maybe sampling reward outside of buffer range" ds_iterator = ds.get_iterator( sample_args={ "batch_size": int(FLAGS.batch_size * FLAGS.utd_ratio * FLAGS.offline_ratio), "pack_obs_and_next_obs": True, } ) replay_buffer = MemoryEfficientReplayBuffer( env.observation_space, env.action_space, FLAGS.max_steps ) replay_buffer_iterator = replay_buffer.get_iterator( sample_args={ "batch_size": int( FLAGS.batch_size * FLAGS.utd_ratio * (1 - FLAGS.offline_ratio) ), "pack_obs_and_next_obs": True, } ) replay_buffer.seed(FLAGS.seed) ########### MODELS ########### # Crashes on some setups if agent is created before replay buffer. kwargs = dict(FLAGS.config) model_cls = kwargs.pop("model_cls") agent = globals()[model_cls].create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) if FLAGS.offline_relabel_type != "gt": kwargs = dict(FLAGS.rm_config) model_cls = kwargs.pop("model_cls") rm = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rm = None if FLAGS.use_rnd_offline or FLAGS.use_rnd_online: kwargs = dict(FLAGS.rnd_config) model_cls = kwargs.pop("model_cls") rnd = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rnd = None # Pre-training record_step = 0 # ICVF training and initialize RM and RND with ICVF encoder if FLAGS.use_icvf: # assert rm is not None or rnd is not None, "ICVF is not needed in this configuration" icvf = PixelICVF.create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **dict(FLAGS.config), )
gc_ds = gc_dataset.GCSDataset(ds, **gc_dataset.GCSDataset.get_default_config())
9
2023-11-19 21:28:52+00:00
16k
Luo-Z13/pointobb
PointOBB/mmdet/datasets/cocofmt_obb.py
[ { "identifier": "COCO", "path": "PointOBB/mmdet/datasets/api_wrappers/coco_api.py", "snippet": "class COCO(_COCO):\n def __init__(self, annotation_file=None):\n def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):\n def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):\n def get_img_ids(self, img_ids=[], cat_ids=[]):\n def load_anns(self, ids):\n def load_cats(self, ids):\n def load_imgs(self, ids):" }, { "identifier": "DATASETS", "path": "PointOBB/mmdet/datasets/builder.py", "snippet": "DATASETS = Registry('dataset')" }, { "identifier": "CocoDataset", "path": "PointOBB/mmdet/datasets/coco.py", "snippet": "class CocoDataset(CustomDataset):\n\n CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',\n 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',\n 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',\n 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',\n 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotation from COCO style annotation file.\n\n Args:\n ann_file (str): Path of annotation file.\n\n Returns:\n list[dict]: Annotation info from COCO api.\n \"\"\"\n\n self.coco = COCO(ann_file)\n # The order of returned `cat_ids` will not\n # change with the order of the CLASSES\n self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.img_ids = self.coco.get_img_ids()\n data_infos = []\n total_ann_ids = []\n for i in self.img_ids:\n info = self.coco.load_imgs([i])[0]\n info['filename'] = info['file_name']\n data_infos.append(info)\n ann_ids = self.coco.get_ann_ids(img_ids=[i])\n total_ann_ids.extend(ann_ids)\n assert len(set(total_ann_ids)) == len(\n total_ann_ids), f\"Annotation ids in '{ann_file}' are not unique!\"\n return data_infos\n\n def get_ann_info(self, idx):\n \"\"\"Get COCO annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n \"\"\"\n\n img_id = self.data_infos[idx]['id']\n ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n ann_info = self.coco.load_anns(ann_ids)\n return self._parse_ann_info(self.data_infos[idx], ann_info)\n\n def get_cat_ids(self, idx):\n \"\"\"Get COCO category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n \"\"\"\n\n img_id = self.data_infos[idx]['id']\n ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n ann_info = self.coco.load_anns(ann_ids)\n return [ann['category_id'] for ann in ann_info]\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small or without ground truths.\"\"\"\n valid_inds = []\n # obtain images that contain annotation\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n # obtain images that contain annotations of the required categories\n ids_in_cat = set()\n for i, class_id in enumerate(self.cat_ids):\n ids_in_cat |= set(self.coco.cat_img_map[class_id])\n # merge the image id sets of the two conditions and use the merged set\n # to filter out images if self.filter_empty_gt=True\n ids_in_cat &= ids_with_ann\n\n valid_img_ids = []\n for i, img_info in enumerate(self.data_infos):\n img_id = self.img_ids[i]\n if self.filter_empty_gt and img_id not in ids_in_cat:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n valid_img_ids.append(img_id)\n self.img_ids = valid_img_ids\n return valid_inds\n\n def _parse_ann_info(self, img_info, ann_info):\n \"\"\"Parse bbox and mask annotation.\n\n Args:\n ann_info (list[dict]): Annotation info of an image.\n with_mask (bool): Whether to parse mask annotations.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore,\\\n labels, masks, seg_map. \"masks\" are raw annotations and not \\\n decoded into binary masks.\n \"\"\"\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))\n inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))\n if inter_w * inter_h == 0:\n continue\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n if ann['category_id'] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann['category_id']])\n gt_masks_ann.append(ann.get('segmentation', None))\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info['filename'].replace('jpg', 'png')\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=seg_map)\n\n return ann\n\n def xyxy2xywh(self, bbox):\n \"\"\"Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n evaluation.\n\n Args:\n bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n ``xyxy`` order.\n\n Returns:\n list[float]: The converted bounding boxes, in ``xywh`` order.\n \"\"\"\n\n _bbox = bbox.tolist()\n return [\n _bbox[0],\n _bbox[1],\n _bbox[2] - _bbox[0],\n _bbox[3] - _bbox[1],\n ]\n\n def _proposal2json(self, results):\n \"\"\"Convert proposal results to COCO json style.\"\"\"\n json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n bboxes = results[idx]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = 1\n json_results.append(data)\n return json_results\n\n def _det2json(self, results):\n \"\"\"Convert detection results to COCO json style.\"\"\"\n json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n result = results[idx]\n for label in range(len(result)):\n bboxes = result[label]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = self.cat_ids[label]\n if len(bboxes[i]) >= 6: # add by hui\n data['ann_id'] = int(bboxes[i][5])\n json_results.append(data)\n return json_results\n\n def _segm2json(self, results):\n \"\"\"Convert instance segmentation results to COCO json style.\"\"\"\n bbox_json_results = []\n segm_json_results = []\n for idx in range(len(self)):\n img_id = self.img_ids[idx]\n det, seg = results[idx]\n for label in range(len(det)):\n # bbox results\n bboxes = det[label]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(bboxes[i][4])\n data['category_id'] = self.cat_ids[label]\n bbox_json_results.append(data)\n\n # segm results\n # some detectors use different scores for bbox and mask\n if isinstance(seg, tuple):\n segms = seg[0][label]\n mask_score = seg[1][label]\n else:\n segms = seg[label]\n mask_score = [bbox[4] for bbox in bboxes]\n for i in range(bboxes.shape[0]):\n data = dict()\n data['image_id'] = img_id\n data['bbox'] = self.xyxy2xywh(bboxes[i])\n data['score'] = float(mask_score[i])\n data['category_id'] = self.cat_ids[label]\n if isinstance(segms[i]['counts'], bytes):\n segms[i]['counts'] = segms[i]['counts'].decode()\n data['segmentation'] = segms[i]\n segm_json_results.append(data)\n return bbox_json_results, segm_json_results\n\n def results2json(self, results, outfile_prefix):\n \"\"\"Dump the detection results to a COCO style json file.\n\n There are 3 types of results: proposals, bbox predictions, mask\n predictions, and they have different data types. This method will\n automatically recognize the type, and dump them to json files.\n\n Args:\n results (list[list | tuple | ndarray]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files. If the\n prefix is \"somepath/xxx\", the json files will be named\n \"somepath/xxx.bbox.json\", \"somepath/xxx.segm.json\",\n \"somepath/xxx.proposal.json\".\n\n Returns:\n dict[str: str]: Possible keys are \"bbox\", \"segm\", \"proposal\", and \\\n values are corresponding filenames.\n \"\"\"\n result_files = dict()\n if isinstance(results[0], list):\n json_results = self._det2json(results)\n result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n mmcv.dump(json_results, result_files['bbox'])\n elif isinstance(results[0], tuple):\n json_results = self._segm2json(results)\n result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n result_files['segm'] = f'{outfile_prefix}.segm.json'\n mmcv.dump(json_results[0], result_files['bbox'])\n mmcv.dump(json_results[1], result_files['segm'])\n elif isinstance(results[0], np.ndarray):\n json_results = self._proposal2json(results)\n result_files['proposal'] = f'{outfile_prefix}.proposal.json'\n mmcv.dump(json_results, result_files['proposal'])\n else:\n raise TypeError('invalid type of results')\n return result_files\n\n def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):\n gt_bboxes = []\n for i in range(len(self.img_ids)):\n ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])\n ann_info = self.coco.load_anns(ann_ids)\n if len(ann_info) == 0:\n gt_bboxes.append(np.zeros((0, 4)))\n continue\n bboxes = []\n for ann in ann_info:\n if ann.get('ignore', False) or ann['iscrowd']:\n continue\n x1, y1, w, h = ann['bbox']\n bboxes.append([x1, y1, x1 + w, y1 + h])\n bboxes = np.array(bboxes, dtype=np.float32)\n if bboxes.shape[0] == 0:\n bboxes = np.zeros((0, 4))\n gt_bboxes.append(bboxes)\n\n recalls = eval_recalls(\n gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)\n ar = recalls.mean(axis=1)\n return ar\n\n def format_results(self, results, jsonfile_prefix=None, **kwargs):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing \\\n the json filepaths, tmp_dir is the temporal directory created \\\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n result_files = self.results2json(results, jsonfile_prefix)\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n classwise=False,\n proposal_nums=(100, 300, 1000),\n iou_thrs=None,\n metric_items=None):\n \"\"\"Evaluation in COCO protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n 'bbox', 'segm', 'proposal', 'proposal_fast'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float], optional): IoU threshold used for\n evaluating recalls/mAPs. If set to a list, the average of all\n IoUs will also be computed. If not specified, [0.50, 0.55,\n 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n Default: None.\n metric_items (list[str] | str, optional): Metric items that will\n be returned. If not specified, ``['AR@100', 'AR@300',\n 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be\n used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',\n 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when\n ``metric=='bbox' or metric=='segm'``.\n\n Returns:\n dict[str, float]: COCO style evaluation metric.\n \"\"\"\n\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n if iou_thrs is None:\n iou_thrs = np.linspace(\n .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n if metric_items is not None:\n if not isinstance(metric_items, list):\n metric_items = [metric_items]\n\n result_files, tmp_dir = self.format_results(results, jsonfile_prefix)\n\n eval_results = OrderedDict()\n cocoGt = self.coco\n for metric in metrics:\n msg = f'Evaluating {metric}...'\n if logger is None:\n msg = '\\n' + msg\n print_log(msg, logger=logger)\n\n if metric == 'proposal_fast':\n ar = self.fast_eval_recall(\n results, proposal_nums, iou_thrs, logger='silent')\n log_msg = []\n for i, num in enumerate(proposal_nums):\n eval_results[f'AR@{num}'] = ar[i]\n log_msg.append(f'\\nAR@{num}\\t{ar[i]:.4f}')\n log_msg = ''.join(log_msg)\n print_log(log_msg, logger=logger)\n continue\n\n iou_type = 'bbox' if metric == 'proposal' else metric\n if metric not in result_files:\n raise KeyError(f'{metric} is not in results')\n try:\n predictions = mmcv.load(result_files[metric])\n if iou_type == 'segm':\n # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa\n # When evaluating mask AP, if the results contain bbox,\n # cocoapi will use the box area instead of the mask area\n # for calculating the instance area. Though the overall AP\n # is not affected, this leads to different\n # small/medium/large mask AP results.\n for x in predictions:\n x.pop('bbox')\n warnings.simplefilter('once')\n warnings.warn(\n 'The key \"bbox\" is deleted for more accurate mask AP '\n 'of small/medium/large instances since v2.12.0. This '\n 'does not change the overall mAP calculation.',\n UserWarning)\n cocoDt = cocoGt.loadRes(predictions)\n except IndexError:\n print_log(\n 'The testing results of the whole dataset is empty.',\n logger=logger,\n level=logging.ERROR)\n break\n\n cocoEval = COCOeval(cocoGt, cocoDt, iou_type)\n cocoEval.params.catIds = self.cat_ids\n cocoEval.params.imgIds = self.img_ids\n cocoEval.params.maxDets = list(proposal_nums)\n cocoEval.params.iouThrs = iou_thrs\n # mapping of cocoEval.stats\n coco_metric_names = {\n 'mAP': 0,\n 'mAP_50': 1,\n 'mAP_75': 2,\n 'mAP_s': 3,\n 'mAP_m': 4,\n 'mAP_l': 5,\n 'AR@100': 6,\n 'AR@300': 7,\n 'AR@1000': 8,\n 'AR_s@1000': 9,\n 'AR_m@1000': 10,\n 'AR_l@1000': 11\n }\n if metric_items is not None:\n for metric_item in metric_items:\n if metric_item not in coco_metric_names:\n raise KeyError(\n f'metric item {metric_item} is not supported')\n\n if metric == 'proposal':\n cocoEval.params.useCats = 0\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n if metric_items is None:\n metric_items = [\n 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',\n 'AR_m@1000', 'AR_l@1000'\n ]\n\n for item in metric_items:\n val = float(\n f'{cocoEval.stats[coco_metric_names[item]]:.3f}')\n eval_results[item] = val\n else:\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n if classwise: # Compute per-category AP\n # Compute per-category AP\n # from https://github.com/facebookresearch/detectron2/\n precisions = cocoEval.eval['precision']\n # precision: (iou, recall, cls, area range, max dets)\n assert len(self.cat_ids) == precisions.shape[2]\n\n results_per_category = []\n for idx, catId in enumerate(self.cat_ids):\n # area range index 0: all area ranges\n # max dets index -1: typically 100 per image\n nm = self.coco.loadCats(catId)[0]\n precision = precisions[:, :, idx, 0, -1]\n precision = precision[precision > -1]\n if precision.size:\n ap = np.mean(precision)\n else:\n ap = float('nan')\n results_per_category.append(\n (f'{nm[\"name\"]}', f'{float(ap):0.3f}'))\n\n num_columns = min(6, len(results_per_category) * 2)\n results_flatten = list(\n itertools.chain(*results_per_category))\n headers = ['category', 'AP'] * (num_columns // 2)\n results_2d = itertools.zip_longest(*[\n results_flatten[i::num_columns]\n for i in range(num_columns)\n ])\n table_data = [headers]\n table_data += [result for result in results_2d]\n table = AsciiTable(table_data)\n print_log('\\n' + table.table, logger=logger)\n\n if metric_items is None:\n metric_items = [\n 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'\n ]\n\n for metric_item in metric_items:\n key = f'{metric}_{metric_item}'\n val = float(\n f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'\n )\n eval_results[key] = val\n ap = cocoEval.stats[:6]\n eval_results[f'{metric}_mAP_copypaste'] = (\n f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '\n f'{ap[4]:.3f} {ap[5]:.3f}')\n if tmp_dir is not None:\n tmp_dir.cleanup()\n return eval_results" }, { "identifier": "eval_rbbox_map", "path": "PointOBB/mmdet/datasets/utils.py", "snippet": "def eval_rbbox_map(det_results,\n annotations,\n scale_ranges=None,\n iou_thr=0.5,\n use_07_metric=True,\n dataset=None,\n logger=None,\n nproc=4):\n \"\"\"Evaluate mAP of a rotated dataset.\n\n Args:\n det_results (list[list]): [[cls1_det, cls2_det, ...], ...].\n The outer list indicates images, and the inner list indicates\n per-class detected bboxes.\n annotations (list[dict]): Ground truth annotations where each item of\n the list indicates an image. Keys of annotations are:\n\n - `bboxes`: numpy array of shape (n, 5)\n - `labels`: numpy array of shape (n, )\n - `bboxes_ignore` (optional): numpy array of shape (k, 5)\n - `labels_ignore` (optional): numpy array of shape (k, )\n scale_ranges (list[tuple] | None): Range of scales to be evaluated,\n in the format [(min1, max1), (min2, max2), ...]. A range of\n (32, 64) means the area range between (32**2, 64**2).\n Default: None.\n iou_thr (float): IoU threshold to be considered as matched.\n Default: 0.5.\n use_07_metric (bool): Whether to use the voc07 metric.\n dataset (list[str] | str | None): Dataset name or dataset classes,\n there are minor differences in metrics for different datasets, e.g.\n \"voc07\", \"imagenet_det\", etc. Default: None.\n logger (logging.Logger | str | None): The way to print the mAP\n summary. See `mmcv.utils.print_log()` for details. Default: None.\n nproc (int): Processes used for computing TP and FP.\n Default: 4.\n\n Returns:\n tuple: (mAP, [dict, dict, ...])\n \"\"\"\n assert len(det_results) == len(annotations)\n\n num_imgs = len(det_results)\n num_scales = len(scale_ranges) if scale_ranges is not None else 1\n num_classes = len(det_results[0]) # positive class num\n area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]\n if scale_ranges is not None else None)\n\n pool = get_context('spawn').Pool(nproc)\n eval_results = []\n for i in range(num_classes):\n # get gt and det bboxes of this class\n cls_dets, cls_gts, cls_gts_ignore = get_cls_results(\n det_results, annotations, i)\n\n # compute tp and fp for each image with multiple processes\n tpfp = pool.starmap(\n tpfp_default,\n zip(cls_dets, cls_gts, cls_gts_ignore,\n [iou_thr for _ in range(num_imgs)],\n [area_ranges for _ in range(num_imgs)]))\n tp, fp = tuple(zip(*tpfp))\n # calculate gt number of each scale\n # ignored gts or gts beyond the specific scale are not counted\n num_gts = np.zeros(num_scales, dtype=int)\n for _, bbox in enumerate(cls_gts):\n if area_ranges is None:\n num_gts[0] += bbox.shape[0]\n else:\n gt_areas = bbox[:, 2] * bbox[:, 3]\n for k, (min_area, max_area) in enumerate(area_ranges):\n num_gts[k] += np.sum((gt_areas >= min_area)\n & (gt_areas < max_area))\n # sort all det bboxes by score, also sort tp and fp\n cls_dets = np.vstack(cls_dets)\n num_dets = cls_dets.shape[0]\n sort_inds = np.argsort(-cls_dets[:, -1])\n tp = np.hstack(tp)[:, sort_inds]\n fp = np.hstack(fp)[:, sort_inds]\n # calculate recall and precision with tp and fp\n tp = np.cumsum(tp, axis=1)\n fp = np.cumsum(fp, axis=1)\n eps = np.finfo(np.float32).eps\n recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)\n precisions = tp / np.maximum((tp + fp), eps)\n # calculate AP\n if scale_ranges is None:\n recalls = recalls[0, :]\n precisions = precisions[0, :]\n num_gts = num_gts.item()\n mode = 'area' if not use_07_metric else '11points'\n ap = average_precision(recalls, precisions, mode)\n eval_results.append({\n 'num_gts': num_gts,\n 'num_dets': num_dets,\n 'recall': recalls,\n 'precision': precisions,\n 'ap': ap\n })\n pool.close()\n if scale_ranges is not None:\n # shape (num_classes, num_scales)\n all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])\n all_num_gts = np.vstack(\n [cls_result['num_gts'] for cls_result in eval_results])\n mean_ap = []\n for i in range(num_scales):\n if np.any(all_num_gts[:, i] > 0):\n mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())\n else:\n mean_ap.append(0.0)\n else:\n aps = []\n for cls_result in eval_results:\n if cls_result['num_gts'] > 0:\n aps.append(cls_result['ap'])\n mean_ap = np.array(aps).mean().item() if aps else 0.0\n\n print_map_summary(\n mean_ap, eval_results, dataset, area_ranges, logger=logger)\n\n return mean_ap, eval_results" }, { "identifier": "obb2poly_np", "path": "PointOBB/mmdet/datasets/utils.py", "snippet": "def obb2poly_np(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to polygons.\n\n Args:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n \"\"\"\n if version == 'oc':\n results = obb2poly_np_oc(rbboxes)\n elif version == 'le135':\n results = obb2poly_np_le135(rbboxes)\n elif version == 'le90':\n results = obb2poly_np_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" }, { "identifier": "poly2obb_np", "path": "PointOBB/mmdet/datasets/utils.py", "snippet": "def poly2obb_np(polys, version='oc'):\n \"\"\"Convert polygons to oriented bounding boxes.\n\n Args:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n version (Str): angle representations.\n\n Returns:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n \"\"\"\n if version == 'oc':\n results = poly2obb_np_oc(polys)\n elif version == 'le135':\n results = poly2obb_np_le135(polys)\n elif version == 'le90':\n results = poly2obb_np_le90(polys)\n else:\n raise NotImplementedError\n return results" } ]
import itertools import logging import os.path as osp import warnings import mmcv import numpy as np import tempfile import os import shutil from collections import OrderedDict from mmcv.utils import print_log from terminaltables import AsciiTable from huicv.evaluation.expand_cocofmt_eval import COCOExpandEval from huicv.evaluation.location_evaluation import LocationEvaluator from functools import partial from mmdet.core import eval_recalls from .api_wrappers import COCO, COCOeval from .builder import DATASETS from .coco import CocoDataset from .utils import eval_rbbox_map, obb2poly_np, poly2obb_np from huicv.corner_dataset.corner_dataset_util import generate_corner_dataset from huicv.coarse_utils.noise_data_utils import get_new_json_file_path, generate_pseudo_bbox_for_point from .noise_data_utils import get_new_json_file_path
11,030
"""Dump the detection results to a COCO style json file. There are 3 types of results: proposals, bbox predictions, mask predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. Args: results (list[list | tuple | ndarray]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.bbox.json", "somepath/xxx.segm.json", "somepath/xxx.proposal.json". Returns: dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ values are corresponding filenames. """ result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = f'{outfile_prefix}.proposal.json' mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files def format_results(self, results, jsonfile_prefix=None, **kwargs): """Format the results to json (standard format for COCO evaluation). Args: results (list[tuple | numpy.ndarray]): Testing results of the dataset. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing \ the json filepaths, tmp_dir is the temporal directory created \ for saving json files when jsonfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) return result_files, tmp_dir def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None, cocofmt_kwargs={}, skip_eval=False, use_location_metric=False, location_kwargs={}, use_without_bbox_metric=False, without_bbox_kwargs={}, save_result_file=None, ): # add by hui """Evaluation in COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float], optional): IoU threshold used for evaluating recalls/mAPs. If set to a list, the average of all IoUs will also be computed. If not specified, [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. Default: None. metric_items (list[str] | str, optional): Metric items that will be returned. If not specified, ``['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when ``metric=='bbox' or metric=='segm'``. Returns: dict[str, float]: COCO style evaluation metric. """ nproc=4 nproc = min(nproc, os.cpu_count()) iou_thr=0.5 scale_ranges=None annotations = [self.get_ann_info(i) for i in range(len(self))] for ann in annotations: ann['bboxes'] = ann['true_bboxes'] eval_results = {} assert isinstance(iou_thr, float)
# add by hui, if there is not corner dataset, create one def generate_corner_json_file_if_not_exist(ann_file, data_root, corner_kwargs): # generate corner json file name if data_root is not None: if not osp.isabs(ann_file): ann_file = osp.join(data_root, ann_file) origin_ann_file = ann_file max_tile_size, tile_overlap = corner_kwargs['max_tile_size'], corner_kwargs['tile_overlap'] ann_file = "{}_corner_w{}h{}ow{}oh{}.json".format( ann_file[:-5], max_tile_size[0], max_tile_size[1], tile_overlap[0], tile_overlap[1]) ann_dir, ann_file_name = osp.split(ann_file) corner_file_dir = osp.join(ann_dir, 'corner') ann_file = osp.join(corner_file_dir, ann_file_name) # generate corner dataset and save to disk, if it not exists if not osp.exists(ann_file): _ = generate_corner_dataset(origin_ann_file, save_path=ann_file, **corner_kwargs) print("generate corner dataset done, please re-run your code.") exit(0) return ann_file def generate_pesudo_bbox_for_noise_data(ann_file, data_root, noise_kwargs): # ann_file, _ = get_new_json_file_path(ann_file, data_root, 'noise', 'noisept') # assert osp.exists(ann_file), "{} not exist.".format(ann_file) ori_ann_file = ann_file pseudo_wh = noise_kwargs['pseudo_wh'] if isinstance(pseudo_wh, (int, float)): noise_kwargs['pseudo_wh'] = pseudo_wh = (pseudo_wh, pseudo_wh) suffix = 'pseuw{}h{}'.format(*pseudo_wh) ann_file, _ = get_new_json_file_path(ori_ann_file, data_root, None, suffix) if not osp.exists(ann_file): _ = generate_pseudo_bbox_for_point(ori_ann_file, ann_file, **noise_kwargs) print("generate pseudo bbox for dataset done, please re-run your code.") exit(0) return ann_file @DATASETS.register_module() class CocoFmtObbDataset(CocoDataset): CLASSES = None def __init__(self, ann_file, version='le90', data_root=None, corner_kwargs=None, train_ignore_as_bg=True, noise_kwargs=None, merge_after_infer_kwargs=None, min_gt_size=None, **kwargs): # add by hui, if there is not corner dataset, create one if corner_kwargs is not None: assert ann_file[-5:] == '.json', "ann_file must be a json file." ann_file = generate_corner_json_file_if_not_exist(ann_file, data_root, corner_kwargs) print("load corner dataset json file from {}".format(ann_file)) if noise_kwargs is not None: if 'pseudo_wh' in noise_kwargs and noise_kwargs['pseudo_wh'] is not None: ann_file = generate_pesudo_bbox_for_noise_data(ann_file, data_root, noise_kwargs) elif 'wh_suffix' in noise_kwargs: ann_file, _ = get_new_json_file_path(ann_file, data_root, noise_kwargs['sub_dir'], noise_kwargs['wh_suffix']) else: raise ValueError('one of [pseudo_wh, wh_suffix] must be given') print("load noise dataset json file from {}".format(ann_file)) self.train_ignore_as_bg = train_ignore_as_bg self.merge_after_infer_kwargs = merge_after_infer_kwargs self.min_gt_size = min_gt_size self.version = version super(CocoFmtObbDataset, self).__init__( ann_file, data_root=data_root, **kwargs ) def load_annotations(self, ann_file): """Load annotation from COCO style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCO(ann_file) if self.CLASSES is None: self.CLASSES = [cat['name'] for cat in self.coco.dataset['categories']] # add by hui print(f'self classes:{self.CLASSES}') # The order of returned `cat_ids` will not # change with the order of the CLASSES self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] total_ann_ids = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] data_infos.append(info) ann_ids = self.coco.get_ann_ids(img_ids=[i]) total_ann_ids.extend(ann_ids) assert len(set(total_ann_ids)) == len( total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" return data_infos def _filter_imgs(self, min_size=32): valid_inds = super(CocoFmtObbDataset, self)._filter_imgs(min_size) # filter image only contain ignore_bboxes or too small bbox if self.min_gt_size: new_valid_inds, valid_img_ids = [], [] for i, img_id in enumerate(self.img_ids): valid = False for ann in self.coco.imgToAnns[img_id]: if 'ignore' in ann and ann['ignore']: continue if ann['bbox'][-1] > self.min_gt_size and ann['bbox'][-2] > self.min_gt_size: valid = True if valid: new_valid_inds.append(valid_inds[i]) valid_img_ids.append(img_id) self.img_ids = valid_img_ids valid_inds = new_valid_inds print("valid image count: ", len(valid_inds)) # add by hui return valid_inds def get_ann_info(self, idx): """Get COCO annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return self._parse_ann_info(self.data_infos[idx], ann_info) def _parse_ann_info(self, img_info, ann_info): """Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore,\ labels, masks, seg_map. "masks" are raw annotations and not \ decoded into binary masks. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] true_bboxes, anns_id, ann_weight = [], [], [] # add by hui,fei for i, ann in enumerate(ann_info): if self.train_ignore_as_bg and ann.get('ignore', False): # change by hui continue x1, y1, w, h = ann['bbox'] inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) if inter_w * inter_h == 0: continue if ann['area'] <= 0 or w < 1 or h < 1: continue if ann['category_id'] not in self.cat_ids: continue bbox = [x1, y1, x1 + w, y1 + h] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: if 'true_rbox' in ann: x1,y1,x2,y2,x3,y3,x4,y4 = ann['true_rbox'] poly = np.array((x1,y1,x2,y2,x3,y3,x4,y4), dtype=np.float32) result = poly2obb_np(poly, self.version) if result is not None: x, y, w, h, a = result true_bboxes.append([x, y, w, h, a]) else: print(f'poly is None: {poly}') filename = img_info['file_name'] print(f'image info: {filename}') continue elif 'true_bbox' in ann: x1, y1, w, h = ann['true_bbox'] poly = np.array((x1,y1,x1+w,y1,x1+w,y1+h,x1,y1+h), dtype=np.float32) result = poly2obb_np(poly, 'oc') if result is not None: x, y, w, h, a = result true_bboxes.append([x, y, w, h, a]) else: print(f'poly is None: {poly}') filename = img_info['file_name'] print(f'image info: {filename}') continue gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann.get('segmentation', None)) anns_id.append(ann['id']) if 'ann_weight' in ann: weight = ann['ann_weight'] ann_weight.append(weight) if len(true_bboxes) > 0: # add by hui true_bboxes = np.array(true_bboxes, dtype=np.float32) anns_id = np.array(anns_id, dtype=np.int64) ann_weight = np.array(ann_weight, dtype=np.float32) # add by fei if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) seg_map = img_info['filename'].replace('jpg', 'png') ann = dict( bboxes=gt_bboxes, labels=gt_labels, anns_id=anns_id, # add by hui bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map, ) if len(true_bboxes) > 0: # add by hui ann['true_bboxes'] = true_bboxes if len(ann_weight) > 0: # add by fei ann['ann_weight'] = ann_weight return ann def _proposal2json(self, results): """Convert proposal results to COCO json style.""" json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) return json_results def _det2json(self, results): """Convert detection results to COCO json style.""" json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = bboxes[i][0:5] data['score'] = float(bboxes[i][5]) data['category_id'] = self.cat_ids[label] if len(bboxes[i]) >= 7: data['ann_id'] = int(bboxes[i][6]) json_results.append(data) return json_results def results2json(self, results, outfile_prefix): """Dump the detection results to a COCO style json file. There are 3 types of results: proposals, bbox predictions, mask predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. Args: results (list[list | tuple | ndarray]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.bbox.json", "somepath/xxx.segm.json", "somepath/xxx.proposal.json". Returns: dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ values are corresponding filenames. """ result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = f'{outfile_prefix}.proposal.json' mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files def format_results(self, results, jsonfile_prefix=None, **kwargs): """Format the results to json (standard format for COCO evaluation). Args: results (list[tuple | numpy.ndarray]): Testing results of the dataset. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing \ the json filepaths, tmp_dir is the temporal directory created \ for saving json files when jsonfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) return result_files, tmp_dir def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None, cocofmt_kwargs={}, skip_eval=False, use_location_metric=False, location_kwargs={}, use_without_bbox_metric=False, without_bbox_kwargs={}, save_result_file=None, ): # add by hui """Evaluation in COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float], optional): IoU threshold used for evaluating recalls/mAPs. If set to a list, the average of all IoUs will also be computed. If not specified, [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. Default: None. metric_items (list[str] | str, optional): Metric items that will be returned. If not specified, ``['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when ``metric=='bbox' or metric=='segm'``. Returns: dict[str, float]: COCO style evaluation metric. """ nproc=4 nproc = min(nproc, os.cpu_count()) iou_thr=0.5 scale_ranges=None annotations = [self.get_ann_info(i) for i in range(len(self))] for ann in annotations: ann['bboxes'] = ann['true_bboxes'] eval_results = {} assert isinstance(iou_thr, float)
mean_ap, _ = eval_rbbox_map(
3
2023-11-20 07:50:12+00:00
16k
wangermeng2021/llm-webui
main.py
[ { "identifier": "login_huggingface", "path": "src/utils/common.py", "snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_TOKEN)\n else:\n env_file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\"token.env\")\n load_dotenv(env_file_path)\n HUGGINGFACE_HUB_TOKEN = os.getenv('HUGGINGFACE_HUB_TOKEN')\n print(\"d2:\", HUGGINGFACE_HUB_TOKEN)\n login(token=HUGGINGFACE_HUB_TOKEN)\n os.environ[\"HUGGING_FACE_HUB_TOKEN\"] = HUGGINGFACE_HUB_TOKEN" }, { "identifier": "HuggingfaceInference", "path": "src/finetune/huggingface_inference.py", "snippet": "class HuggingfaceInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,using_4bit_quantization=True,low_cpu_mem_usage=False):\n self.model = None\n self.tokenizer = None\n self.hg_model = None\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n self.bnb_config = None\n if using_4bit_quantization:\n self.bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n self.low_cpu_mem_usage = low_cpu_mem_usage\n def load_model(self):\n try:\n \n if self.model_path.split(os.sep)[-1].rfind(\"llama\") >=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n if not self.tokenizer.pad_token:\n if self.model_path.split(os.sep)[-1].lower().rfind(\"gpt2\")>=0:\n self.tokenizer.pad_token = self.tokenizer.eos_token\n else:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.hg_model.resize_token_embeddings(len(self.tokenizer))\n\n except Exception as e:\n return -1, e\n self.model = pipeline(\n \"text-generation\",\n model=self.hg_model,\n tokenizer=self.tokenizer,\n max_new_tokens = self.max_new_tokens,\n temperature=self.temperature,\n top_p=self.top_p,top_k=self.top_k,do_sample=True,\n return_full_text=False,\n repetition_penalty=self.repetition_penalty,\n # return_dict_in_generate = True\n )\n return 0, \"\"\n def infer(self ,input):\n output = self.model(input)\n return output[0]['generated_text'] if output else None\n def free_memory(self):\n if self.hg_model:\n del self.hg_model\n self.hg_model = None\n if self.tokenizer:\n del self.tokenizer\n self.tokenizer = None\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "LlamaCppInference", "path": "src/finetune/llama_cpp_inference.py", "snippet": "class LlamaCppInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,n_gpu_layers=35, n_ctx=4048,verbose=False):\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prefix1 = \"\"\n self.prefix2 = \"\"\n self.model = None\n\n def load_model(self):\n load_model_status = 0\n msg = None\n try:\n self.model = LlamaCpp(model_path=self.model_path, n_gpu_layers=35, n_ctx=4096,max_tokens=self.max_new_tokens, temperature=self.temperature,\n verbose=False, top_k=self.top_k, top_p=self.top_p,repeat_penalty=self.repetition_penalty)\n except Exception as e:\n load_model_status = -1\n msg = e\n return load_model_status, msg\n def infer(self ,input):\n return self.model(input)\n\n\n def free_memory(self):\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "QAWithRAG", "path": "src/rag/qa_with_rag.py", "snippet": "class QAWithRAG():\n def __init__(self ,config: dict ={}):\n self.text_splitter = None\n self.embedding_function = None\n self.vectorstore = None\n self.retriever = None\n self.chat_llm = None\n\n self.chat_history =[]\n # self.persist_directory = \"./chroma_db\"\n self.persist_directory = None\n self.qa = None\n self.langchain_llm = None\n def free_memory(self):\n if self.chat_llm:\n self.chat_llm.free_memory()\n del self.chat_llm\n self.chat_llm = None\n if self.langchain_llm:\n del self.langchain_llm\n self.langchain_llm = None\n if self.qa:\n del self.qa\n self.qa = None\n\n\n def get_text_splitter(self ,chunk_size ,chunk_overlap ,separators):\n self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len,\n separators=separators)\n def load_embedding_model(self ,model_path=\"\"):\n self.embedding_function = HuggingFaceEmbeddings(model_name=model_path ,model_kwargs = {'device': 'cpu'})\n def load_chat_model(self ,model_path,using_4bit_quantization,low_cpu_mem_usage,\n max_new_tokens, temperature, top_k, top_p, repeat_penalty\n ):\n self.set_prompt_template(model_path)\n load_model_status = 0\n if model_path.split('.')[-1] == \"gguf\":\n self.chat_llm = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens, temperature=temperature,\n top_k=top_k, top_p=top_p, repetition_penalty=repeat_penalty)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = self.chat_llm.model\n else:\n self.chat_llm = HuggingfaceInference(model_path, max_new_tokens, temperature, top_p, top_k, repeat_penalty, using_4bit_quantization,low_cpu_mem_usage)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = HuggingFacePipeline(pipeline=self.chat_llm.model)\n\n return load_model_status, msg\n\n #\n def get_document_data(self ,doc_path):\n self.chat_history = []\n self.chat_history.clear()\n self.doc_ext = doc_path.split('.')[-1]\n if self.doc_ext == \"txt\":\n loader = TextLoader(doc_path, encoding='utf8')\n elif self.doc_ext == \"pdf\":\n loader = PyPDFLoader(doc_path)\n elif self.doc_ext == \"docx\":\n loader = Docx2txtLoader(doc_path)\n else:\n raise ValueError(f\"Unsupported format: {self.doc_ext}\")\n data = loader.load()\n return data\n def add_document_to_vector_store(self, doc_path ,search_top_k ,search_score_threshold):\n data = self.get_document_data(doc_path)\n data = self.text_splitter.split_documents(data)\n try:\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n except InvalidDimensionException:\n Chroma().delete_collection()\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n self.set_retriever(search_top_k ,search_score_threshold)\n\n def set_retriever(self ,search_top_k ,score_threshold):\n self.retriever = self.vectorstore.as_retriever(search_type='similarity_score_threshold',\n search_kwargs={'k': search_top_k, \"score_threshold\": score_threshold})\n def set_prompt_template(self ,chat_model_path):\n\n if chat_model_path.lower().find(\"mistral\") >= 0 and chat_model_path.lower().find(\"instruct\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"llama\") >= 0 and chat_model_path.lower().find(\"chat\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"zephyr\") >= 0:\n prompt_template = \"\"\"<|user|>\\n Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: </s><|assistant|>\\n\"\"\"\n else:\n prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer:\"\"\"\n\n self.prompt_template = PromptTemplate(\n template=prompt_template, input_variables=[\"context\", \"question\"]\n )\n def generate(self, question):\n self.chat_history = []\n if self.retriever:\n\n chain_type_kwargs = {\"prompt\": self.prompt_template ,\"verbose\": False}\n self.qa = RetrievalQA.from_chain_type(llm=self.langchain_llm, chain_type=\"stuff\", retriever=self.retriever,\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs)\n result = self.qa({\"query\": question}, return_only_outputs=True)\n retrieved_txt_list = []\n if len(result['source_documents'] ) >0:\n if self.doc_ext == \"txt\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"pdf\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"docx\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n answer = result['result']\n else:\n answer = \"Sorry, I can't find any relevant information in document. \" + result['result']\n return answer, retrieved_txt_list\n else:\n return \"\", retrieved_txt_list" }, { "identifier": "read_yaml", "path": "src/utils/common.py", "snippet": "def read_yaml(yaml_path):\n with open(yaml_path) as f1:\n try:\n data = yaml.safe_load(f1)\n return data\n except yaml.YAMLError as e:\n raise ValueError(f'Error loading yaml file: {e}')" }, { "identifier": "get_first_row_from_dataset", "path": "src/utils/common.py", "snippet": "def get_first_row_from_dataset(dataset_path):\n if os.path.exists(os.path.join(dataset_path, \"dataset_dict.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_infos.json\")):\n dataset = datasets.load_dataset(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_info.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n else:\n raise ValueError(\n f'Invalid Dataset format {dataset_path}.')\n try:\n split_list = list(dataset.keys())\n except:\n split_list = [\"train\"]\n new_split_list= [\"\",\"\",\"\"]\n for split in split_list:\n if split.find(\"train\") >= 0:\n new_split_list[0] = split\n elif split.find(\"val\") >= 0:\n new_split_list[1] = split\n elif split.find(\"test\") >= 0:\n new_split_list[2] = split\n\n return dataset[new_split_list[0]][0],new_split_list" }, { "identifier": "get_runs_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_runs_model_names_from_dir(root_dir):\n\n run_names = os.listdir(root_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(root_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n model_bin_path = os.path.exists(\n os.path.join(root_dir,\n run_name, \"output_model\", run_output_model_name, \"ori\",\n \"pytorch_model.bin\"))\n if run_output_model_name.find(\"merged_\") >= 0 and model_bin_path:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n return runs_output_model" }, { "identifier": "get_hg_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_from_dir(root_dir):\n model_names = os.listdir(root_dir)\n model_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n return model_names" }, { "identifier": "get_hg_model_names_and_gguf_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_and_gguf_from_dir(hg_model_root_dir,runs_model_root_dir):\n output = []\n runs_gguf_files = glob.glob(os.path.join(runs_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"*.gguf\"), recursive=False)\n root_model_hg_dir0 = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"config.json\"),recursive=False)\n root_model_hg_dir1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"config.json\"), recursive=False)\n runs_hg_dir = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"config.json\"),recursive=False)\n runs_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir0.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n runs_hg_dir.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n\n for file in runs_gguf_files:\n file_pos = file.find(\"runs\")\n output.append(file[file_pos:])\n for file in root_model_gguf_files:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_gguf_files1:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_hg_dir0:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in root_model_hg_dir1:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in runs_hg_dir:\n file_pos = file.find(\"runs\")+len(\"runs\")+1\n output.append(file[file_pos:])\n return output" }, { "identifier": "validate_model_path", "path": "src/utils/common.py", "snippet": "def validate_model_path(model_name):\n if not model_name:\n return False,\"\"\n home_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n base_model_config_path1 = os.path.join(home_dir, \"models\", model_name)\n base_model_config_path2 = os.path.join(base_model_config_path1, \"config.json\")\n run_model_config_path1 = os.path.join(home_dir, \"runs\", model_name)\n run_model_config_path2 = os.path.join(run_model_config_path1, \"config.json\")\n if os.path.exists(base_model_config_path1) and base_model_config_path1.endswith(\".gguf\"):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path1) and run_model_config_path1.endswith(\".gguf\") :\n return True,run_model_config_path1\n if os.path.exists(base_model_config_path2):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path2):\n return True,run_model_config_path1\n return False,\"\"" }, { "identifier": "get_runs_models", "path": "src/utils/common.py", "snippet": "def get_runs_models():\n training_runs_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'runs')\n run_names = os.listdir(training_runs_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file)))\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(training_runs_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n if run_output_model_name.find(\"merged_\") >= 0:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n runs_output_model = runs_output_model[::-1]\n return runs_output_model" }, { "identifier": "get_model_type", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_type(model_path):\n if model_path:\n if model_path.lower().find(\"mistral\") >= 0 and model_path.lower().find(\"instruct\") >= 0:\n model_type = \"mistral\"\n elif model_path.lower().find(\"llama\") >= 0 and model_path.lower().find(\"chat\") >= 0:\n model_type = \"llama2\"\n elif model_path.lower().find(\"zephyr\") >= 0:\n model_type = \"zephyr\"\n else:\n model_type = \"other model\"\n else:\n model_type = \"other model\"\n return model_type" }, { "identifier": "get_chat_history_prompt", "path": "src/utils/chat_prompts.py", "snippet": "def get_chat_history_prompt(chat_history,model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt = ','.join(chat_history[:-2])\n prompt = prompt + chat_history[-2]\n elif model_type == \"llama2\":\n prompt = format_chat_history_prompt_for_llama2_7b_chat(chat_history)\n elif model_type == \"zephyr\":\n prompt = format_chat_history_prompt_for_zephyr_7b_instruct(chat_history)\n elif model_type == \"mistral\":\n prompt = format_chat_history_prompt_for_mistral_7b_instruct(chat_history)\n return prompt" }, { "identifier": "get_model_prompt_template", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_prompt_template(model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n elif model_type == \"llama2\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n elif model_type == \"zephyr\":\n prompt_template = PromptTemplate.from_template(\n \"<|user|>\\n{question}</s><|assistant|>\\n\"\n )\n elif model_type == \"mistral\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n return prompt_template" }, { "identifier": "download_model", "path": "src/utils/download_model.py", "snippet": "class ModelDownloader:\n def __init__(self, max_retries=5):\n def sanitize_model_and_branch_names(self, model, branch):\n def get_download_links_from_huggingface(self, model, branch, text_only=False, specific_file=None):\n def get_output_folder(self, model, branch, is_lora, is_llamacpp=False, base_folder=None):\n def get_single_file(self, url, output_folder, start_from_scratch=False):\n def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=4):\n def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None, is_llamacpp=False):\n def check_model_files(self, model, branch, links, sha256, output_folder):" }, { "identifier": "QloraTrainer", "path": "src/finetune/qlora_trainer.py", "snippet": "class QloraTrainer(PeftTrainer):\n\n def __init__(self, config: dict):\n self.config = config\n self.tokenizer = None\n self.base_model = None\n self.merged_model = None\n self.dataset = None\n self.fused_model = None\n self.train_dataset = None\n self.val_dataset = None\n self.logging_callback = self.LoggingCallbacks()\n print(\"config:\",config)\n def load_dataset(self):\n if self.config[\"dataset\"][\"hg_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_infos.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset= datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_val_dataset\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_dict.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_val_dataset\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"hg_dataset_dir\"]}.')\n else:\n\n if self.config[\"dataset\"][\"local_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_infos.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_val_set\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_dict.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_val_set\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"local_dataset_dir\"]}.')\n\n\n if self.config[\"dataset\"][\"max_length\"] == \"Model Max Length\":\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"mistral\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"zephyr\") >= 0:\n context_window = 1024*4\n else:\n context_window = self.tokenizer.model_max_length\n if self.tokenizer.model_max_length == int(1e30):\n context_window = 1024\n else:\n context_window = self.config[\"dataset\"][\"max_length\"]\n print(\"context_window:\",context_window)\n self.train_dataset = self.train_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n # padding=True\n ))\n if self.val_dataset:\n self.val_dataset = self.val_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n padding=True\n ))\n def generate_prompt(self,sample,eos_token):\n\n prompt = self.config[\"dataset\"][\"prefix1\"]+sample[self.config[\"dataset\"][\"datatset_col1\"]]+\\\n self.config[\"dataset\"][\"prefix2\"] + sample[self.config[\"dataset\"][\"datatset_col2\"]]+eos_token\n # print(\"prompt:\",prompt)\n return prompt\n\n def load_model(self):\n\n if self.config[\"model\"][\"fine_tuning_type\"] == \"QLoRA\":\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n elif self.config[\"model\"][\"fine_tuning_type\"] == \"LoRA\":\n bnb_config = None\n try:\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n except Exception as e:\n return -1,e\n if not self.tokenizer.pad_token:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n if self.config[\"training\"][\"gradient_checkpointing\"] and not self.config[\"model\"][\"base_model_name\"].rfind(\"phi\")>=0:\n # self.base_model.gradient_checkpointing_enable()\n self.base_model = prepare_model_for_kbit_training(self.base_model,use_gradient_checkpointing=True,gradient_checkpointing_kwargs={'use_reentrant':False})\n else:\n self.base_model = prepare_model_for_kbit_training(self.base_model, use_gradient_checkpointing=False,gradient_checkpointing_kwargs={'use_reentrant':False})\n if self.config[\"model\"][\"base_model_name\"].lower().rfind(\"llama\")>=0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"mistral\") >= 0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"zephyr\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"llama\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"falcon\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"falcon\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"gpt2\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"gpt2\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"phi\") >= 0:\n target_modules = [\"Wqkv\", \"out_proj\"]\n task_type = \"CAUSAL_LM\"\n else:\n raise ValueError(f'{self.config[\"model\"][\"base_model_name\"]} is not yet supported.')\n #T5,bart, task_type = \"SEQ_2_SEQ_LM\" ,AutoModelForSeq2SeqLM\n \n lora_config = LoraConfig(\n r=self.config[\"model\"][\"lora_r\"],\n lora_alpha=self.config[\"model\"][\"lora_alpha\"],\n target_modules=target_modules,\n lora_dropout=self.config[\"model\"][\"lora_dropout\"],\n bias=self.config[\"model\"][\"lora_bias\"],\n task_type=task_type,\n )\n self.fused_model = get_peft_model(self.base_model, lora_config)\n # self.fused_model.gradient_checkpointing = True\n return 0,\"\"\n def train(self):\n self.run_name = datetime.now().strftime(\"run_%Y-%m-%d_%H-%M-%S\")\n logging_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"tensorboard\")\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"output_model\", run_output_model_name + \"_adapter\")\n checkpoint_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name)\n self.trainer = transformers.Trainer(\n model=self.fused_model,\n train_dataset=self.train_dataset,\n eval_dataset= self.val_dataset if self.val_dataset else None,\n args=transformers.TrainingArguments(\n per_device_train_batch_size=self.config[\"training\"][\"batch_size\"],\n gradient_accumulation_steps=self.config[\"training\"][\"gradient_accumulation_steps\"],\n warmup_steps=self.config[\"training\"][\"warmup_steps\"],\n num_train_epochs=self.config[\"training\"][\"epochs\"],\n learning_rate=self.config[\"training\"][\"learning_rate\"],\n fp16=True,\n output_dir=checkpoint_dir,\n report_to=\"tensorboard\",\n optim=self.config[\"training\"][\"optimizer\"],\n lr_scheduler_type=self.config[\"training\"][\"lr_scheduler_type\"],\n load_best_model_at_end=True if self.val_dataset else False,\n save_strategy=\"steps\",\n save_steps = self.config[\"training\"][\"eval_steps\"],\n save_total_limit=1,\n evaluation_strategy=\"steps\" if self.val_dataset else \"no\",\n eval_steps=self.config[\"training\"][\"eval_steps\"], # eval interval\n per_device_eval_batch_size=1,\n # eval_steps=10, # eval interval\n logging_steps=100,#self.config[\"training\"][\"eval_steps\"]\n # run_name=self.run_name,\n logging_dir=logging_dir,\n ),\n\n callbacks=[self.logging_callback,transformers.EarlyStoppingCallback(early_stopping_patience=self.config[\"training\"][\"early_stopping_patience\"]) ] if self.config[\"training\"][\"early_stopping_patience\"]>0 else [self.logging_callback],\n data_collator=transformers.DataCollatorForLanguageModeling(self.tokenizer, mlm=False),\n\n )\n\n self.fused_model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n try:\n self.trainer.train()\n except Exception as e:\n return -1,e\n # model_save_path = f\"{self.config['training']['output_dir']}/{self.config['model']['base_model_name']}_adapter\"\n self.trainer.save_model(output_model_dir)\n return 0,\"\"\n def merge_and_save(self):\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n else:\n base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_adapter_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\n run_output_model_name + \"_adapter\")\n\n model = PeftModel.from_pretrained(base_model, output_adapter_model_dir)\n\n merged_model = model.merge_and_unload()\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_merged_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\"merged_\"+run_output_model_name,\"ori\")\n merged_model.save_pretrained(output_merged_model_dir)\n self.tokenizer.save_pretrained(output_merged_model_dir)\n\n def _print_trainable_parameters(self, model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\n class LoggingCallbacks(transformers.TrainerCallback):\n # current_step = 0\n # max_steps = 0\n\n def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n pass\n\n def on_step_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n global TRAINING_STATUS\n if TRAINING_STATUS.status == 1:\n control.should_epoch_stop = True\n control.should_training_stop = True\n else:\n self.max_steps = state.max_steps\n self.current_step = state.global_step\n\n def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, logs, **kwargs):\n pass\n\n def free_memroy(self):\n try:\n del self.fused_model\n del self.tokenizer\n del self.base_model\n del self.trainer\n torch.cuda.empty_cache()\n except Exception as e:\n print(\"Free memory error:\",e)" }, { "identifier": "TRAINING_STATUS", "path": "src/finetune/qlora_trainer.py", "snippet": "TRAINING_STATUS = TrainingStatus()" }, { "identifier": "download_model_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_model_wrapper(repo_id,local_model_root_dir, specific_file=None, return_links=False, check=False,progress = gr.Progress()):\n if repo_id.endswith(\".gguf\"):\n try:\n model_dir = os.path.join(local_model_root_dir, '/'.join(repo_id.split('/')[0:-1]))\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {repo_id.split('/')[-1]} to `{model_dir}/...`</span>\"\n hf_hub_download(repo_id='/'.join(repo_id.split('/')[0:-1]), filename=repo_id.split('/')[-1], local_dir=model_dir, resume_download=True,\n force_download=False)\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n else:\n if repo_id == \"\" or repo_id == \"None\":\n # return gr.update(value=\"Model's name is empty!\",visible=True)\n yield f\"Model's name is empty!\"\n else:\n model_dir = os.path.join(local_model_root_dir, repo_id)\n\n model_config_path = os.path.join(model_dir, \"config.json\")\n model_config_path1 = os.path.join(model_dir, \"pytorch_model.bin\")\n model_config_path2 = os.path.join(model_dir, \"model.safetensors\")\n if os.path.exists(model_config_path1) or os.path.exists(model_config_path2):\n yield '<span style=\"color:green\">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded.</span>'\n else:\n\n try:\n progress(0.0)\n # download_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"download-model.py\")\n # downloader = importlib.import_module(download_model_path).ModelDownloader()\n downloader = download_model.ModelDownloader()\n model, branch = downloader.sanitize_model_and_branch_names(repo_id, None)\n yield (\"Getting the download links from Hugging Face\")\n links, sha256, is_lora, is_llamacpp, link_file_size_list = downloader.get_download_links_from_huggingface(model,\n branch,\n text_only=False,\n specific_file=specific_file\n )\n if return_links:\n yield '\\n\\n'.join([f\"`{Path(link).name}`\" for link in links])\n yield (\"Getting the output folder\")\n # base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir\n base_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\n output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp,\n base_folder=base_folder)\n link_file_size_list = np.array(link_file_size_list)\n links = np.array(links)\n sorted_index = np.argsort(link_file_size_list)\n link_file_size_list = link_file_size_list[sorted_index]\n links = links[sorted_index]\n total_file_size = sum(link_file_size_list)\n copyed_file_size = 0\n for link, link_file_size in zip(links, link_file_size_list):\n model_file_name = link.split('/')[-1]\n if model_file_name.find(\"Pooling\")>=0:\n model_file_name = model_file_name+\"/config.json\"\n # yield (f\"Downloading file {model_file_name} to `{output_folder}/...`\")\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {model_file_name} to `{output_folder}/...`</span>\"\n hf_hub_download(repo_id=repo_id, filename=model_file_name, local_dir=model_dir, resume_download=True,\n force_download=False)\n copyed_file_size += link_file_size\n progress(copyed_file_size / total_file_size)\n # yield (\"Download successful!\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" }, { "identifier": "download_dataset_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_dataset_wrapper(repo_id,local_dataset_root_dir,progress = gr.Progress()):\n repo_id = repo_id.strip()\n if repo_id == \"\":\n yield \"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset's name is empty!</span>\"\n else:\n dataset_dir = os.path.join(local_dataset_root_dir, repo_id)\n # dataset_config_path1 = os.path.join(dataset_dir, \"config.json\")\n dataset_config_path1 = os.path.join(dataset_dir, \"dataset_infos.json\")\n dataset_config_path2 = os.path.join(dataset_dir, \"dataset_dict.json\")\n\n if os.path.exists(dataset_config_path1) or os.path.exists(dataset_config_path2):\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset has already been downloaded.</span>\"\n else:\n try:\n\n progress(0.3)\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading dataset to `{dataset_dir}/...`</span>\"\n datasets = load_dataset(repo_id)\n progress(0.8)\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n datasets.save_to_disk(dataset_dir)\n # datasets = load_from_disk(\"dddd\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" } ]
import pandas as pd import math import numpy as np import gc import os,requests import subprocess,threading import time import gradio as gr import os import traceback import numpy as np import glob import shutil import torch import socket from src.utils.common import login_huggingface from src.finetune.huggingface_inference import HuggingfaceInference from src.finetune.llama_cpp_inference import LlamaCppInference from src.rag.qa_with_rag import QAWithRAG from src.utils.common import read_yaml,get_first_row_from_dataset,\ get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template from transformers.training_args import OptimizerNames from huggingface_hub import hf_hub_download from src.utils import download_model from pathlib import Path from src.finetune.qlora_trainer import QloraTrainer from src.finetune.qlora_trainer import TRAINING_STATUS from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper
13,257
global chatbot_history, stop_generation_status stop_generation_status = True chatbot_history = [] return gr.update(value=None) def show_chatbot_question1(text): global chatbot_history if not text: raise gr.Error('Enter text') chatbot_history = chatbot_history + [[text, '']] chatbot_history = chatbot_history[-5:] return chatbot_history def show_chatbot_question2(text): global chatbot_history if not text: raise gr.Error('Enter text') chatbot_history = chatbot_history + [[text, '']] chatbot_history = chatbot_history[-5:] return chatbot_history # input_txtbox.submit(add_text) def generate_btn_click1(input_txtbox): global chatbot_history, infer_model, stop_generation_status chatbot_history_np = np.asarray(chatbot_history) chatbot_history_np = chatbot_history_np.flatten() chatbot_history_list = chatbot_history_np.tolist() stop_generation_status = False model_type = "other model" if infer_model: model_type = get_model_type(infer_model.model_path) prompt = get_chat_history_prompt(chatbot_history_list, model_type) print(f"{model_type} input prompt:", prompt) answer = infer_model(prompt) else: raise gr.Error("Model is not loaded!") return chatbot_history,gr.update(value="") print(f"{model_type} output:", answer) for char in answer: if stop_generation_status: break try: chatbot_history[-1][-1] += char except: break time.sleep(0.05) # print("d2:",chatbot_history) yield chatbot_history,gr.update(value="") yield chatbot_history,gr.update(value="") def generate_btn_click2(input_txtbox): global chatbot_history, infer_model, stop_generation_status chatbot_history_np = np.asarray(chatbot_history) chatbot_history_np = chatbot_history_np.flatten() chatbot_history_list = chatbot_history_np.tolist() stop_generation_status = False running_model_name = "other model" if infer_model: if infer_model.model_path.lower().find("mistral") >= 0 and infer_model.model_path.lower().find( "instruct") >= 0: running_model_name = "mistral" prompt = get_chat_history_prompt(chatbot_history_list, running_model_name) elif infer_model.model_path.lower().find("llama") >= 0 and infer_model.model_path.lower().find("chat") >= 0: running_model_name = "llama2" prompt = get_chat_history_prompt(chatbot_history_list, running_model_name) elif infer_model.model_path.lower().find("zephyr") >= 0: running_model_name = "zephyr" prompt = get_chat_history_prompt(chatbot_history_list, running_model_name) else: prompt = ','.join(chatbot_history_list[:-2]) prompt = prompt + chatbot_history_list[-2] print(f"{running_model_name} input prompt:", prompt) answer = infer_model(prompt) else: raise gr.Error("Model is not loaded!") return chatbot_history,gr.update(value="") print(f"{running_model_name} output:", answer) for char in answer: if stop_generation_status: break try: chatbot_history[-1][-1] += char except: break time.sleep(0.05) # print("d2:",chatbot_history) yield chatbot_history,gr.update(value="") yield chatbot_history,gr.update(value="") def generate_btn_click_clear_text1(): return gr.update(value="") def generate_btn_click_clear_text2(): return gr.update(value="") input_txtbox.submit(show_chatbot_question1, inputs=[input_txtbox], outputs=[chatbot], queue=False). \ success(generate_btn_click1, inputs=[input_txtbox], outputs=[chatbot,input_txtbox]) generate_btn.click(show_chatbot_question2, inputs=[input_txtbox], outputs=[chatbot], queue=False). \ success(generate_btn_click2, inputs=[input_txtbox], outputs=[chatbot,input_txtbox]) # clear_btn.click(clear_chat_history, [], chatbot) stop_btn.click(click_stop_btn) ########################## ###################### def click_delete_text_btn(training_runs_dropdown): delete_run_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', training_runs_dropdown) if os.path.exists(delete_run_dir): shutil.rmtree(delete_run_dir) training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names = [run_name for run_name in run_names if os.path.isdir(os.path.join(training_runs_dir, run_name))] run_names.sort(key=lambda f: os.path.getmtime(os.path.join(training_runs_dir, f))) sss = np.random.randint(0, 100) + 1000 iframe = f'<iframe src={TENSORBOARD_URL} style="border:none;height:{sss}px;width:100%">' return gr.update(choices=run_names, value=run_names[0] if run_names else ""), gr.update(value=iframe) def click_stop_training_btn(): global stop_training
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889' # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889' LOCAL_HOST_IP = "0.0.0.0" TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/" INIT_DATASET_NAME = "test_python_code_instructions_5000_rows" RAG_DATA_LIST_DROPDOWN = "" TEXT_SPLITTER_DROPDOWN = "" CHUNK_SIZE_SLIDER = 0 CHUNK_OVERLAP_SLIDER = -1 SEPARATORS_TEXTBOX = "" EMBEDDING_MODEL_SOURCE_RADIO = "" HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = "" LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = "" CHAT_MODEL_SOURCE_RADIO = "" HUB_CHAT_MODEL_NAMES_DROPDOWN = "" LOCAL_CHAT_MODEL_NAMES_DROPDOWN = "" SEARCH_TOP_K_SLIDER = "" SEARCH_SCORE_THRESHOLD_SLIDER = "" training_ret_val = -1 error_msg = "" current_running_model_name = "" infer_model = None stop_generation_status = False chatbot_history=[] chatbot_height = 500 rag_chatbot_history=[] rag_stop_generation_status = False qa_with_rag = QAWithRAG() train_param_config = {} train_param_config["dataset"]={} train_param_config["model"]={} train_param_config["training"]={} model_zoo_config = {} transformer_optimizer_list = [] model_context_window = 0 init_train_file_path = None init_val_file_path = None INIT_PREFIX1 = "" INIT_PREFIX2 = "" INIT_PREFIX3 = "" INIT_PREFIX4 = "" INIT_COL1_TEXT = "" INIT_COL2_TEXT = "" INIT_COL3_TEXT = "" INIT_COL4_TEXT = "" col_names = [] DATASET_FIRST_ROW = None local_model_list = "" local_model_root_dir = "" base_model_names = [] training_base_model_names = [] embedding_model_names = [] base_model_context_window = [] local_dataset_list = [] local_dataset_root_dir = "" def get_local_embedding_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>') else: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') # home_chat_model_running_status_markdown = gr.Markdown( # '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=3 ) generate_btn = gr.Button("Generate", scale=1) stop_btn = gr.Button("Stop", scale=1) # clear_btn = gr.Button("Clear",scale=1) with gr.Tab("Fine-Tuning"): with gr.Tabs() as tensorboard_tab: with gr.TabItem("Training", id=0): with gr.Row(): with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;1.Training", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;1).Model", elem_classes="white_background") with gr.Group(): # gr.Markdown("<br> &nbsp;&nbsp;&nbsp; Base Model") base_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_model_root_dir})"] base_model_source_radio = gr.Radio(base_model_source_radio_choices, label="Base Model", value=base_model_source_radio_choices[0], interactive=True) with gr.Row(elem_classes="white_background"): base_model_name_dropdown = gr.Dropdown(training_base_model_names, label="Model Name", value=training_base_model_names[0] if training_base_model_names else None, interactive=True, visible=True, scale=5, allow_custom_value=True) download_local_model_btn = gr.Button("Download", scale=1, visible=True) stop_download_local_model_btn = gr.Button("Stop", scale=1, visible=False) # model_download_status = gr.Markdown("<div id='vertical_center_align_markdown'><p style='text-align: center;'>Not downloaded</p></div>", elem_classes="white_background",scale=1,full_width=True,visible=False) if validate_model_path(training_base_model_names[0])[0]: download_model_status_markdown = gr.Markdown('<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_model_status_markdown = gr.Markdown('<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): # local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") # runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_model_list = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) local_model_list = get_hg_model_names_from_dir(os.path.dirname(os.path.abspath(__file__)), "models") local_model_dropdown = gr.Dropdown(local_model_list, label="Local Model", info="", value=local_model_list[0] if len(local_model_list) > 0 else None, interactive=True, elem_classes="white_background", scale=5, visible=False) refresh_local_model_list_btn = gr.Button("Refresh", scale=1, visible=False) fine_tuning_type_dropdown = gr.Dropdown(["QLoRA", "LoRA"], label="Fine-Tuning Type", info="", value="QLoRA", interactive=True) with gr.Group(): with gr.Row(elem_classes="white_background"): # gr.Markdown("### &nbsp;&nbsp;&nbsp; LoRA Config", elem_classes="white_background") lora_r_list = [str(ri) for ri in range(8, 65, 8)] lora_r_slider = gr.Slider(8, 64, value=8, step=8, label="lora_r", interactive=True) # lora_r_dropdown = gr.Dropdown(lora_r_list,label="lora_r", value=lora_r_list[0],interactive=True,allow_custom_value=True) lora_alpha_slider = gr.Slider(8, 96, value=32, step=8, label="lora_alpha", interactive=True) # lora_alpha_list = [str(ri) for ri in range(8, 97, 8)] # lora_alpha_dropdown = gr.Dropdown(lora_alpha_list,label="lora_alpha", value=lora_alpha_list[3],interactive=True,allow_custom_value=True) with gr.Row(elem_classes="white_background"): lora_dropout_slider = gr.Slider(0, 1, value=0.05, step=0.01, label="lora_dropout", interactive=True) lora_bias_dropdown = gr.Dropdown(["none", "all", "lora_only"], label="lora_bias", info="", value="none", interactive=True) with gr.Group(): gr.Markdown("### &nbsp;2).Dataset",elem_classes="white_background") dataset_source_radio_choices = ["Download From Huggingface Hub", f"From Local HG Dataset In {local_dataset_root_dir})"] dataset_source_radio = gr.Radio(dataset_source_radio_choices, label="Dataset Source", value=dataset_source_radio_choices[1], interactive=True) with gr.Row(equal_height=True): hg_dataset_path_textbox = gr.Textbox(label="Dataset Name:",elem_classes="none_border",visible=False, interactive=True, scale=4, value="iamtarun/python_code_instructions_18k_alpaca") download_local_dataset_btn = gr.Button("Download", scale=1, visible=False) stop_download_local_dataset_btn = gr.Button("Stop", scale=1, visible=False) download_dataset_status_markdown = gr.Markdown('') with gr.Row(): hg_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1,value="train") hg_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1) with gr.Row(): local_dataset_list.pop( local_dataset_list.index(INIT_DATASET_NAME)) local_dataset_list.insert(0, INIT_DATASET_NAME) local_train_path_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Train Dataset", info="", value=local_dataset_list[0] if len(local_dataset_list)>0 else None, interactive=True, elem_classes="white_background", scale=5, visible=True) refresh_local_train_path_dataset_list_btn = gr.Button("Refresh", scale=1, visible=True) with gr.Row(): local_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=True, elem_classes="white_background", scale=1,value="train",visible=True) local_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=True, elem_classes="white_background", scale=1,visible=True) with gr.Group(elem_classes="white_background"): # gr.Markdown("<h4><br> &nbsp;&nbsp;Prompt Template: (Prefix1 + ColumnName1 + Prefix2 + ColumnName2)</h4>",elem_classes="white_background") gr.Markdown("<br> &nbsp;&nbsp;&nbsp;&nbsp;**Prompt Template: (Prefix1+ColumnName1+Prefix2+ColumnName2+Prefix3+ColumnName3+Prefix4+ColumnName4)**",elem_classes="white_background") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>",elem_classes="white_background") # using_llama2_chat_template_checkbox = gr.Checkbox(True, label="Using Llama2/Mistral chat template",interactive=True,visible=False) with gr.Row(elem_classes="white_background"): # prompt_template prefix1_textbox = gr.Textbox(label="Prefix1:",value=INIT_PREFIX1,lines=2,interactive=True,elem_classes="white_background") datatset_col1_dropdown = gr.Dropdown(col_names, label="ColumnName1:", info="",value=col_names[1],interactive=True,elem_classes="white_background") prefix2_textbox = gr.Textbox(label="Prefix2:",value=INIT_PREFIX2,lines=2,interactive=True,elem_classes="white_background") datatset_col2_dropdown = gr.Dropdown(col_names, label="ColumnName2:", info="",value=col_names[2],interactive=True,elem_classes="white_background") with gr.Row(elem_classes="white_background"): prefix3_textbox = gr.Textbox(label="Prefix3:",value=INIT_PREFIX3,lines=2,interactive=True,elem_classes="white_background") datatset_col3_dropdown = gr.Dropdown(col_names, label="ColumnName3:", info="",value=col_names[3],interactive=True,elem_classes="white_background") prefix4_textbox = gr.Textbox(label="Prefix4:",value=INIT_PREFIX4,lines=2,interactive=True,elem_classes="white_background") datatset_col4_dropdown = gr.Dropdown(col_names, label="ColumnName4:", info="",value=col_names[0],interactive=True,elem_classes="white_background") # print("") prompt_sample = INIT_PREFIX1 + INIT_COL1_TEXT + INIT_PREFIX2 + INIT_COL2_TEXT + INIT_PREFIX3 + INIT_COL3_TEXT + INIT_PREFIX4 + INIT_COL4_TEXT prompt_sample_textbox = gr.Textbox(label="Prompt Sample:",interactive=False,value=prompt_sample,lines=4) max_length_dropdown = gr.Dropdown(["Model Max Length"]+model_context_window, label="Max Length",value="Model Max Length", interactive=True,allow_custom_value=True) with gr.Group(): gr.Markdown("### &nbsp;3).Training Arguments",elem_classes="white_background") with gr.Row(elem_classes="white_background"): epochs_slider = gr.Slider(1, 100, value=10, step=1, label="Epochs", interactive=True) # epochs_dropdown = gr.Dropdown([1]+[bi for bi in range(10,101,10)], label="Epochs",value=1, interactive=True,allow_custom_value=True) batch_size_list = [1,2,3]+[bi for bi in range(4,32+1,4)] batch_size_slider = gr.Slider(1, 100, value=1, step=1, label="Batch Size", interactive=True) # batch_size_dropdown = gr.Dropdown(batch_size_list,label="Batch Size", info="",value=batch_size_list[0],interactive=True,allow_custom_value=True) # learning_rate_textbox = gr.Textbox(label="Learning Rate", value=2e-4,interactive=True) with gr.Row(elem_classes="white_background"): learning_rate_slider = gr.Slider(0, 0.01, value=2e-4, step=0.0001, label="Learning Rate", interactive=True) warmup_steps_slider = gr.Slider(0, 400, value=100, step=10, label="Warmup Steps", interactive=True) with gr.Row(elem_classes="white_background"): optimizer_dropdown = gr.Dropdown(transformer_optimizer_list, label="Optimizer", info="", value=transformer_optimizer_list[1], interactive=True) lr_scheduler_list = ["linear","cosine","cosine_with_hard_restarts","polynomial_decay","constant","constant_with_warmup","inverse_sqrt","reduce_on_plateau"] lr_scheduler_type_dropdown = gr.Dropdown(lr_scheduler_list, label="LR Scheduler Type", info="", value=lr_scheduler_list[0], interactive=True) with gr.Row(elem_classes="white_background"): early_stopping_patience_slider = gr.Slider(0, 50+1, value=0, step=5, label="Early Stopping Patience", interactive=True) gradient_accumulation_steps_slider = gr.Slider(1, 50, value=1, step=1, label="Gradient Accumulation Steps") with gr.Row(elem_classes="white_background"): eval_steps_slider = gr.Slider(0, 1000, value=100, step=100, label="eval_steps", interactive=True) gradient_checkpointing_checkbox = gr.Checkbox(True,label="Gradient Checkpointing",interactive=True) train_btn = gr.Button("Start Training") with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;2.Test",elem_classes="white_background") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file:os.path.getmtime(os.path.join(training_runs_dir,file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir,run_name) run_output_model = os.path.join(run_name_dir,"output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_")>=0: runs_output_model.append(os.path.join(run_name,"output_model",run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[0] if runs_output_model else None, interactive=True) gr.Markdown("") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>", elem_classes="white_background") with gr.Row(): test_input_textbox = gr.Textbox(label="Input:", interactive=True, value="", lines=4, scale=4) generate_text_btn = gr.Button("Generate",scale=1) finetune_test_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) # test_prompt = gr.Textbox(label="Prompt:", interactive=False, lines=2, scale=1) test_output = gr.Textbox(label="Output:", interactive=False,lines=4, scale=1) # def change_test_input_textbox(test_prefix1_textbox,test_input_textbox,test_prefix2_textbox): # return gr.update(value=test_prefix1_textbox+test_input_textbox+test_prefix2_textbox) # test_input_textbox.change(change_test_input_textbox,[test_prefix1_textbox,test_input_textbox,test_prefix2_textbox],test_prompt) with gr.Group(): gr.Markdown("## &nbsp;3.Quantization",elem_classes="white_background") with gr.Row(): quantization_type_list = ["gguf"] quantization_type_dropdown = gr.Dropdown(quantization_type_list, label="Quantization Type",value=quantization_type_list[0], interactive=True,scale=3) local_quantization_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Dataset for quantization", value=local_dataset_list[0] if len( local_dataset_list) > 0 else None, interactive=True, elem_classes="white_background", scale=7, visible=False) refresh_local_quantization_dataset_btn = gr.Button("Refresh", scale=2, visible=False) def click_refresh_local_quantization_dataset_btn(): local_dataset_list, _ = get_local_dataset_list() return gr.update(choices=local_dataset_list, value=local_dataset_list[0] if len(local_dataset_list) > 0 else "") refresh_local_quantization_dataset_btn.click(click_refresh_local_quantization_dataset_btn,[],local_quantization_dataset_dropdown) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_") >= 0: runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] quantization_runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[ 0] if runs_output_model else None, interactive=True, scale=6) quantize_btn = gr.Button("Quantize", scale=1,visible=False) if runs_output_model: model_name = runs_output_model[0].split(os.sep)[-2].split('_')[-1] quantized_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.sep.join(runs_output_model[0].split(os.sep)[0:-1]), "quantized_" + quantization_type_list[0] + "_" + model_name) if not os.path.exists(quantized_model_dir): os.makedirs(quantized_model_dir) quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''&nbsp;&nbsp;&nbsp;&nbsp;1.Follow the instructions in the llama.cpp to generate a GGUF:[https://github.com/ggerganov/llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp#prepare-data--run),<span style="color:red">&nbsp;&nbsp;Q4_K_M is recommend</span>''',visible=True) if runs_output_model: gguf_quantization_markdown2 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;2.Convert {runs_output_model[0]} to gguf model",visible=True) else: gguf_quantization_markdown2 = gr.Markdown( f"", visible=True) gguf_quantization_markdown3 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;3.Deploy gguf model", visible=False) else: quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''''',visible=True) gguf_quantization_markdown2 = gr.Markdown(f"",visible=True) gguf_quantization_markdown3 = gr.Markdown(f"", visible=True) with gr.Group(visible=False): gr.Markdown("## &nbsp;4.Deploy",elem_classes="white_background") with gr.Row(): deployment_framework_dropdown = gr.Dropdown(["TGI","llama-cpp-python"], label="Deployment Framework",value="TGI", interactive=True) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) # ori_model_runs_output_model = [] tgi_model_format_runs_output_model = [] gguf_model_format_runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: model_bin_path = os.path.exists( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "ori", "pytorch_model.bin")) if run_output_model_name.find("merged_") >= 0 and model_bin_path: tgi_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) gptq_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs',run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1], "pytorch_model.bin") if os.path.exists(gptq_model_path): tgi_model_format_runs_output_model.append(os.path.join(run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1])) gguf_model_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1]) if os.path.exists(gguf_model_dir): gguf_model_names = os.listdir(gguf_model_dir) for gguf_model_name in gguf_model_names: if gguf_model_name.split('.')[-1] == "gguf": gguf_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1], gguf_model_name)) tgi_model_format_runs_output_model = tgi_model_format_runs_output_model[::-1] gguf_model_format_runs_output_model = gguf_model_format_runs_output_model[::-1] deployment_runs_output_model_dropdown = gr.Dropdown(tgi_model_format_runs_output_model, label="runs_output_model", value=tgi_model_format_runs_output_model[ 0] if tgi_model_format_runs_output_model else None, interactive=True,scale=6) refresh_deployment_runs_output_model_btn = gr.Button("Refresh", scale=1, visible=True) if tgi_model_format_runs_output_model: model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.path.dirname(tgi_model_format_runs_output_model[0])) model_name = os.path.basename(tgi_model_format_runs_output_model[0]) if model_name.rfind("quantized_gptq_") >= 0: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name} --quantize gptq''' else: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name}''' run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value=run_server_value) run_client_value = '''Command-Line Interface(CLI):\ncurl 127.0.0.1:8080/generate -X POST -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'\n\nPython:\nfrom huggingface_hub import InferenceClient \nclient = InferenceClient(model="http://127.0.0.1:8080")\noutput = client.text_generation(prompt="What is Deep Learning?",max_new_tokens=512) ''' run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6,scale=1,value=run_client_value) else: run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value="") run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6, scale=1, value="") # deploy_llm_code = gr.Code(code_str, language="shell", lines=5, label="Install Requirements:") install_requirements_value = ''' ### &nbsp;&nbsp; 1.install docker ### &nbsp;&nbsp; 2.Install NVIDIA Container Toolkit <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.1 Configure the repository: </h4> <p> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ && \ sudo apt-get update </p> <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.2 Install the NVIDIA Container Toolkit packages: </h4> <p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sudo apt-get install -y nvidia-container-toolkit </p> ''' with gr.Accordion("Install Requirements",open=False) as install_requirements_accordion: install_requirements_markdown = gr.Markdown(install_requirements_value) run_llama_cpp_python_code = gr.Code("", language="python", lines=10, label="run_model_using_llama_cpp_python.py",visible=False) # run_script_textbox = gr.Textbox(label="Install Requirements:", interactive=False, scale=1,value=install_requirements_value) #dependencies with gr.TabItem("Tensorboard", id=1) as fdddd: # training_log_markdown = gr.Markdown('',every=mytestfun) with gr.Row(): # training_log_textbox = gr.Textbox(label="logging:",value="", interactive=True, lines=2, scale=1) with gr.Group(): training_log_markdown = gr.Markdown('') stop_training_btn = gr.Button("Stop Training") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names = [run_name for run_name in run_names if os.path.isdir(os.path.join(training_runs_dir,run_name))] run_names.sort(key=lambda f: os.path.getmtime(os.path.join(training_runs_dir, f))) # print("dddddddd:",run_names) with gr.Group(): # with gr.Row(): training_runs_dropdown = gr.Dropdown(run_names, label="Training Runs",value=run_names[0] if run_names else None, interactive=True, scale=1) delete_text_btn = gr.Button("Delete Run", scale=1) iframe = f'<iframe src={TENSORBOARD_URL} style="border:none;height:1024px;width:100%">' tensorboard_html = gr.HTML(iframe) with gr.Tab("RAG"): with gr.Row(): with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf","*.txt","*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file),reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) # chat_data_source_radio_choices = ["Chat With Document", # f"Chat With Image"] gr.Markdown("### &nbsp;Chat With Document", elem_classes="white_background") # chat_data_source_radio = gr.Radio(chat_data_source_radio_choices, # label="", # value=chat_data_source_radio_choices[0], # interactive=True) with gr.Row(): rag_data_list_dropdown = gr.Dropdown(matched_file_name_list, label=f"Local Documents In {rag_data_dir}", value=matched_file_name_list[0] if matched_file_name_list else None, interactive=True,scale=4, min_width=1) refresh_rag_data_list_btn = gr.Button("Refresh", scale=1, min_width=1) # if not current_running_model_name: # model_running_status_markdown = gr.Markdown(f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;No modelis running!</span>") # else: # model_running_status_markdown = gr.Markdown(f"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Model is runing:{current_running_model_name}.</span>") def click_refresh_rag_data_list_btn(): rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf", "*.txt", "*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file), reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) return gr.update(choices=matched_file_name_list,value=matched_file_name_list[0] if matched_file_name_list else None) refresh_rag_data_list_btn.click(click_refresh_rag_data_list_btn,[],rag_data_list_dropdown) # def update_model_running_status(): # return gr.update(value=f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{current_running_model_name} is runing!.</span>") # # load_model_btn.click(click_load_model_btn,model_list_dropdown,[model_list_dropdown]).success(update_model_running_status,[],model_running_status_markdown) with gr.Row(): rag_chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): rag_input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=6) rag_generate_btn = gr.Button("Generate", scale=1) rag_stop_btn = gr.Button("Stop", scale=1) # rag_clear_btn = gr.Button("Clear", scale=1) rag_model_running_status_markdown = gr.Markdown( f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) # retrieved_document_chunks_markdown = gr.Markdown( # f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) retrieved_document_chunks_dataframe = gr.Dataframe( headers=["ID", "Chunk"], datatype=["str", "str"], show_label=False, value=None ) with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Group(): gr.Markdown("### &nbsp;&nbsp;1.Chunking", elem_classes="white_background") with gr.Row(): text_splitter_dropdown = gr.Dropdown(["RecursiveCharacterTextSplitter"], label=f"Text Splitter", value="RecursiveCharacterTextSplitter", interactive=True, scale=1, min_width=1) with gr.Row(): chunk_size_slider = gr.Slider(32, 1024, value=256, step=32, label="Chunk Size", interactive=True, scale=1) chunk_overlap_slider = gr.Slider(0, 500, value=20, step=10, label="Chunk Overlap", interactive=True) Separators_textbox = gr.Textbox(label="Separators", value='''["\n\n", "\n", ".", " ", ""]''', interactive=True,visible=False) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;2.Vector Store Retriever", elem_classes="white_background") # local_embedding_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"rag","embedding_models") local_embedding_model_names = get_hg_model_names_from_dir(local_embedding_model_dir,"embedding_models") embedding_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_embedding_model_dir})"] embedding_model_source_radio = gr.Radio(embedding_model_source_radio_choices, label="Embedding Model Source", value=embedding_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_embedding_model_names_dropdown = gr.Dropdown(embedding_model_names, label=f"",show_label=False, value=embedding_model_names[0] if embedding_model_names else None, interactive=True, scale=4, min_width=1) download_hub_embedding_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_embedding_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_embedding_model_names_dropdown = gr.Dropdown(local_embedding_model_names, label=f"Embedding Model",show_label=False, value=local_embedding_model_names[0] if local_embedding_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_embedding_model_names_btn = gr.Button("Refresh", scale=1,visible=False) # model_config_path1 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "pytorch_model.bin") # model_config_path2 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "model.safetensors") model_config_path = os.path.join(local_embedding_model_dir, embedding_model_names[0], "config.json") if os.path.exists(model_config_path): download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): search_top_k_slider = gr.Slider(1, 10, value=3, step=1, label="Search Top K", interactive=True) search_score_threshold_slider = gr.Slider(0, 1, value=0.5, step=0.1, label="Search Score Threshold",interactive=True) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;3.Chat Model", elem_classes="white_background") local_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_chat_model_names = get_hg_model_names_from_dir(local_chat_model_dir) local_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_chat_model_dir,runs_model_root_dir) chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_chat_model_dir})"] chat_model_source_radio = gr.Radio(chat_model_source_radio_choices, label="Chat Model source",show_label=False, value=chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model",show_label=False,allow_custom_value=True, value=base_model_names[0] if base_model_names else None, interactive=True, scale=4, min_width=1) download_hub_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_chat_model_names_dropdown = gr.Dropdown(local_chat_model_names, label=f"Chat Model",show_label=False, value=local_chat_model_names[0] if local_chat_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_chat_model_names_btn = gr.Button("Refresh", scale=1,visible=False) rag_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Tab("Setting"): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Row(): max_new_tokens_slider = gr.Slider(1, 4096, value=256, step=0.1, label="Max New Tokens", interactive=True) temperature_slider = gr.Slider(0, 5, value=1, step=0.1, label="Temperature", interactive=True) with gr.Row(): top_k_slider = gr.Slider(1, 100, value=50, step=1, label="Top_k", interactive=True) top_p_slider = gr.Slider(0, 1, value=1, step=0.1, label="Top_p", interactive=True) with gr.Row(): repeat_penalty_slider = gr.Slider(1, 5, value=1, step=0.1, label="Repeat Penalty", interactive=True) with gr.Row(): chat_history_window_slider = gr.Slider(1, 20, value=3, step=1, label="Chat History Window", interactive=True) low_cpu_mem_usage_checkbox = gr.Checkbox(False, label="Low Cpu Mem Usage",interactive=True,visible=False) Huggingface_hub_token = gr.Textbox(label="Huggingface Hub Token", value="") def check_local_model_or_dataset_is_empty1(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty2(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty3(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty4(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty5(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def download_hub_home_chat_model_postprocess(): return gr.update(visible=True), gr.update(visible=False) def click_download_hub_home_chat_model_btn(): return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def change_home_chat_model_source_radio(home_chat_model_source_radio, hub_home_chat_model_names_dropdown): local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") if home_chat_model_source_radio == "Download From Huggingface Hub": if not hub_home_chat_model_names_dropdown: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>' else: if validate_model_path(hub_home_chat_model_names_dropdown)[0]: model_download_status = '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>' else: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>' return gr.update(visible=True), gr.update(visible=False), gr.update( visible=False), gr.update(visible=True, value=model_download_status), gr.update( visible=True), gr.update( visible=False) else: model_download_status = "" return gr.update(visible=False), gr.update(visible=True), gr.update( visible=True), gr.update(visible=False, value=model_download_status), gr.update( visible=False), gr.update( visible=False) click_download_hub_home_chat_model_names_btn_event = download_hub_home_chat_model_names_btn.click( check_local_model_or_dataset_is_empty1, [hub_home_chat_model_names_dropdown,Huggingface_hub_token]).success( click_download_hub_home_chat_model_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown]).then( download_model_wrapper, [hub_home_chat_model_names_dropdown, local_home_chat_model_root_dir_textbox], download_hub_home_chat_model_status_markdown). \ then(download_hub_home_chat_model_postprocess, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn]) stop_download_hub_home_chat_model_names_btn.click(click_stop_download_hub_home_chat_model_names_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown], cancels=[ click_download_hub_home_chat_model_names_btn_event]) home_chat_model_source_radio.change(change_home_chat_model_source_radio, [home_chat_model_source_radio, hub_home_chat_model_names_dropdown], [hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, refresh_local_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def change_refresh_local_home_chat_model_names_btn(): local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) return gr.update(choices=local_home_chat_model_names,value = local_home_chat_model_names[0] if local_home_chat_model_names else None) refresh_local_home_chat_model_names_btn.click(change_refresh_local_home_chat_model_names_btn,[],[local_home_chat_model_names_dropdown]) def change_hub_home_chat_model_names_dropdown(hub_home_chat_model_names_dropdown): if not hub_home_chat_model_names_dropdown: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>'), \ gr.update(visible=True), gr.update(visible=False) if validate_model_path(hub_home_chat_model_names_dropdown)[0]: return gr.update( visible=True, value='<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>'), \ gr.update(visible=True), gr.update(visible=False) else: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>'), \ gr.update(visible=True), gr.update(visible=False) hub_home_chat_model_names_dropdown.change(change_hub_home_chat_model_names_dropdown, hub_home_chat_model_names_dropdown, [download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def click_load_home_chat_model_btn(home_chat_model_source_radio, hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, max_new_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repeat_penalty_slider, chat_history_window_slider,using_4bit_quantization_checkbox,low_cpu_mem_usage_checkbox, progress=gr.Progress()): if home_chat_model_source_radio == "Download From Huggingface Hub": cur_model_name = hub_home_chat_model_names_dropdown else: cur_model_name = local_home_chat_model_names_dropdown if not validate_model_path(cur_model_name)[0]: raise gr.Error(f"Model does not exist!") global infer_model global stop_generation_status stop_generation_status = True progress(0.6) if infer_model: infer_model.free_memory() infer_model = None torch.cuda.empty_cache() yield "Loading model ..." load_model_status = 0 model_path = validate_model_path(cur_model_name)[1] if model_path.split('.')[-1] == "gguf": infer_model = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens_slider, temperature=temperature_slider, top_k=top_k_slider, top_p=top_p_slider, repetition_penalty=repeat_penalty_slider) load_model_status, msg = infer_model.load_model() else: infer_model = HuggingfaceInference(model_path=model_path, max_new_tokens=max_new_tokens_slider, temperature=temperature_slider, top_k=top_k_slider, top_p=top_p_slider, repetition_penalty=repeat_penalty_slider, using_4bit_quantization=using_4bit_quantization_checkbox, low_cpu_mem_usage=low_cpu_mem_usage_checkbox) load_model_status, msg = infer_model.load_model() if load_model_status == -1: raise gr.Error(f"Loading model error:{msg}") if infer_model: infer_model.free_memory() infer_model = None torch.cuda.empty_cache() return progress(1.0) return gr.update() def update_model_running_status(): global chatbot_history return gr.update(visible=True, value=f"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Model is runing ...</span>"),chatbot_history,gr.update() def show_model_running_status(): return gr.update(visible=True) load_home_chat_model_btn.click(show_model_running_status, [], download_hub_home_chat_model_status_markdown).then( click_load_home_chat_model_btn, [home_chat_model_source_radio, hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, max_new_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repeat_penalty_slider, chat_history_window_slider,using_4bit_quantization_checkbox,low_cpu_mem_usage_checkbox], [download_hub_home_chat_model_status_markdown]). \ success(update_model_running_status, [], [download_hub_home_chat_model_status_markdown,chatbot,input_txtbox]) def click_stop_btn(): global stop_generation_status stop_generation_status = True def clear_chat_history(): global chatbot_history, stop_generation_status stop_generation_status = True chatbot_history = [] return gr.update(value=None) def show_chatbot_question1(text): global chatbot_history if not text: raise gr.Error('Enter text') chatbot_history = chatbot_history + [[text, '']] chatbot_history = chatbot_history[-5:] return chatbot_history def show_chatbot_question2(text): global chatbot_history if not text: raise gr.Error('Enter text') chatbot_history = chatbot_history + [[text, '']] chatbot_history = chatbot_history[-5:] return chatbot_history # input_txtbox.submit(add_text) def generate_btn_click1(input_txtbox): global chatbot_history, infer_model, stop_generation_status chatbot_history_np = np.asarray(chatbot_history) chatbot_history_np = chatbot_history_np.flatten() chatbot_history_list = chatbot_history_np.tolist() stop_generation_status = False model_type = "other model" if infer_model: model_type = get_model_type(infer_model.model_path) prompt = get_chat_history_prompt(chatbot_history_list, model_type) print(f"{model_type} input prompt:", prompt) answer = infer_model(prompt) else: raise gr.Error("Model is not loaded!") return chatbot_history,gr.update(value="") print(f"{model_type} output:", answer) for char in answer: if stop_generation_status: break try: chatbot_history[-1][-1] += char except: break time.sleep(0.05) # print("d2:",chatbot_history) yield chatbot_history,gr.update(value="") yield chatbot_history,gr.update(value="") def generate_btn_click2(input_txtbox): global chatbot_history, infer_model, stop_generation_status chatbot_history_np = np.asarray(chatbot_history) chatbot_history_np = chatbot_history_np.flatten() chatbot_history_list = chatbot_history_np.tolist() stop_generation_status = False running_model_name = "other model" if infer_model: if infer_model.model_path.lower().find("mistral") >= 0 and infer_model.model_path.lower().find( "instruct") >= 0: running_model_name = "mistral" prompt = get_chat_history_prompt(chatbot_history_list, running_model_name) elif infer_model.model_path.lower().find("llama") >= 0 and infer_model.model_path.lower().find("chat") >= 0: running_model_name = "llama2" prompt = get_chat_history_prompt(chatbot_history_list, running_model_name) elif infer_model.model_path.lower().find("zephyr") >= 0: running_model_name = "zephyr" prompt = get_chat_history_prompt(chatbot_history_list, running_model_name) else: prompt = ','.join(chatbot_history_list[:-2]) prompt = prompt + chatbot_history_list[-2] print(f"{running_model_name} input prompt:", prompt) answer = infer_model(prompt) else: raise gr.Error("Model is not loaded!") return chatbot_history,gr.update(value="") print(f"{running_model_name} output:", answer) for char in answer: if stop_generation_status: break try: chatbot_history[-1][-1] += char except: break time.sleep(0.05) # print("d2:",chatbot_history) yield chatbot_history,gr.update(value="") yield chatbot_history,gr.update(value="") def generate_btn_click_clear_text1(): return gr.update(value="") def generate_btn_click_clear_text2(): return gr.update(value="") input_txtbox.submit(show_chatbot_question1, inputs=[input_txtbox], outputs=[chatbot], queue=False). \ success(generate_btn_click1, inputs=[input_txtbox], outputs=[chatbot,input_txtbox]) generate_btn.click(show_chatbot_question2, inputs=[input_txtbox], outputs=[chatbot], queue=False). \ success(generate_btn_click2, inputs=[input_txtbox], outputs=[chatbot,input_txtbox]) # clear_btn.click(clear_chat_history, [], chatbot) stop_btn.click(click_stop_btn) ########################## ###################### def click_delete_text_btn(training_runs_dropdown): delete_run_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', training_runs_dropdown) if os.path.exists(delete_run_dir): shutil.rmtree(delete_run_dir) training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names = [run_name for run_name in run_names if os.path.isdir(os.path.join(training_runs_dir, run_name))] run_names.sort(key=lambda f: os.path.getmtime(os.path.join(training_runs_dir, f))) sss = np.random.randint(0, 100) + 1000 iframe = f'<iframe src={TENSORBOARD_URL} style="border:none;height:{sss}px;width:100%">' return gr.update(choices=run_names, value=run_names[0] if run_names else ""), gr.update(value=iframe) def click_stop_training_btn(): global stop_training
if TRAINING_STATUS.status == 0:
16
2023-11-25 12:37:21+00:00
16k
danilonumeroso/conar
models/vkc_reasoner.py
[ { "identifier": "AlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class AlgorithmReasoner(nn.Module):\n @staticmethod\n def prepare_batch(batch):\n batch = batch.clone()\n for name, tensor in batch.items():\n if not torch.is_tensor(tensor):\n continue\n if name.endswith('_temporal') and 'index' not in name:\n tensor = tensor.transpose(1, 0)\n batch[name] = tensor\n return batch\n\n @staticmethod\n def get_masks(train, batch, continue_logits, enforced_mask):\n mask = continue_logits[batch.batch] > 0\n mask_cp = (continue_logits > 0.0).bool()\n mask_edges = mask[batch.edge_index[0]]\n if not train and enforced_mask is not None:\n enforced_mask_ids = enforced_mask[batch.batch]\n mask &= enforced_mask_ids\n mask_cp &= enforced_mask\n return mask_cp, mask, mask_edges\n\n def add_encoder(self, stage, name, loc, data_type, data_sample, bias):\n if name == 'adj': # we use edge indices\n return\n if data_type == Type.SCALAR or data_type == Type.MASK or data_type == Type.MASK_ONE:\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n self.encoders[stage][name] = nn.Linear(in_shape, self.latent_features, bias=bias)\n\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are 1-hot encoded on the edges\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.encoders[stage][name] = nn.ModuleList([\n nn.Linear(1, self.latent_features, bias=bias),\n nn.Linear(1, self.latent_features, bias=bias)\n ])\n\n def add_decoder(self, stage, name, loc, data_type, data_sample, bias):\n assert name != 'adj', 'Adjacency matrix should not be decoded'\n dec = None\n if loc == Location.NODE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.Linear(2*self.latent_features, 1, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.Linear(2*self.latent_features, in_shape, bias=bias)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are decoded from both node and edge information\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if loc == Location.GRAPH:\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n in_shape = data_sample.shape[-1] if data_type == Type.CATEGORICAL else 1\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n\n if loc == Location.EDGE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n if data_type == Type.POINTER:\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n assert dec is not None, breakpoint()\n self.decoders[stage][name] = dec\n\n\n\n\n def __init__(self,\n spec,\n data,\n latent_features,\n algo_processor,\n bias=True,\n use_TF=False,\n use_sinkhorn=True,\n L1_loss=False,\n xavier_on_scalars=True,\n global_termination_pool='max', #'predinet',\n get_attention=False,\n use_batch_norm=False,\n transferring=False,\n timeit=True,\n **kwargs):\n\n super().__init__()\n self.step_idx = 0\n self.latent_features = latent_features\n self.assert_checks = False\n self.timeit = timeit\n self.debug = False\n self.debug_epoch_threshold = 1e9\n self.L1_loss = L1_loss\n self.global_termination_pool = global_termination_pool\n self.next_step_pool = True\n self.processor = algo_processor\n self.triplet_reasoning = False\n if isinstance(self.processor.processors[0].processor, TripletMPNN):\n self.triplet_reasoning = True\n self.triplet_reductor = nn.Linear(2*latent_features, latent_features, bias=bias)\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.get_attention = get_attention\n self.lambda_mul = 1 # 0.0001\n self.transferring = transferring\n self.node_encoder = nn.Sequential(\n nn.Linear(2*latent_features, latent_features, bias=bias),\n )\n self.encoders = nn.ModuleDict({\n 'input': nn.ModuleDict({\n }),\n 'hint': nn.ModuleDict({\n }),\n })\n self.decoders = nn.ModuleDict({\n 'hint': nn.ModuleDict({\n }),\n 'output': nn.ModuleDict({\n })\n })\n for name, (stage, loc, datatype) in spec.items():\n if name == 'adj': # we use edge indices\n continue\n if stage == 'input':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'output':\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'hint':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n\n self.node_pointer_vec = nn.Parameter(torch.randn(latent_features))\n if xavier_on_scalars:\n assert False, \"NEEDS REFACTORING\"\n torch.nn.init.trunc_normal_(self.encoders['input']['edge_attr'].weight, std=1/torch.sqrt(torch.tensor(latent_features)))\n\n if global_termination_pool == 'attention':\n inp_dim = latent_features\n self.global_attn = GlobalAttentionPlusCoef(\n nn.Sequential(\n nn.Linear(inp_dim, latent_features, bias=bias),\n nn.LeakyReLU(),\n nn.Linear(latent_features, 1, bias=bias)\n ),\n nn=None)\n\n if global_termination_pool == 'predinet':\n lf = latent_features\n self.predinet = PrediNet(lf, 1, lf, lf, flatten_pooling=torch_geometric.nn.glob.global_max_pool)\n\n self.termination_network = nn.Sequential(\n nn.BatchNorm1d(latent_features) if use_batch_norm else nn.Identity(),\n nn.Linear(latent_features, 1, bias=bias),\n )\n\n def get_continue_logits(self, batch_ids, latent_nodes, sth_else=None):\n if self.global_termination_pool == 'mean':\n graph_latent = torch_geometric.nn.global_mean_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'max':\n graph_latent = torch_geometric.nn.global_max_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'attention':\n graph_latent, coef = self.global_attn(latent_nodes, batch_ids)\n if self.get_attention:\n self.attentions[self.step_idx] = coef.clone().detach()\n self.per_step_latent[self.step_idx] = sth_else\n\n if self.global_termination_pool == 'predinet':\n assert not torch.isnan(latent_nodes).any()\n graph_latent = self.predinet(latent_nodes, batch_ids)\n\n if self.get_attention:\n self.attentions[self.step_idx] = latent_nodes\n continue_logits = self.termination_network(graph_latent).view(-1)\n return continue_logits\n\n def zero_termination(self):\n self.true_positive = 0\n self.false_positive = 0\n self.false_negative = 0\n self.true_negative = 0\n\n def zero_steps(self):\n self.sum_of_processed_nodes = 0\n self.sum_of_processed_edges = 0\n self.step_idx = 0\n self.sum_of_steps = 0\n self.cnt = 0\n\n @staticmethod\n def convert_logits_to_outputs(spec,\n logits,\n fr,\n to,\n num_nodes,\n batch_ids,\n include_probabilities=True,\n dbg=False):\n outs = defaultdict(dict)\n\n for stage in logits.keys():\n for name in logits[stage].keys():\n if name not in logits[stage] or name not in spec:\n continue\n stage, loc, data_type = spec[name]\n assert stage != Stage.INPUT\n if data_type == Type.SOFT_POINTER:\n assert False, f\"Not yet added, please add {name}\"\n if data_type in [Type.CATEGORICAL]:\n indices = logits[stage][name].argmax(-1)\n outshape = logits[stage][name].shape[-1]\n outs[stage][name] = F.one_hot(indices, num_classes=outshape).float()\n if data_type == Type.MASK_ONE:\n _, amax = torch_scatter.scatter_max(logits[stage][name], batch_ids, dim=0)\n amax = amax.squeeze(-1)\n outs[stage][name] = torch.zeros_like(logits[stage][name])\n outs[stage][name][amax] = 1\n if data_type == Type.MASK:\n outs[stage][name] = (logits[stage][name] > 0).float()\n if data_type == Type.SCALAR:\n outs[stage][name] = logits[stage][name]\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n _, pointers = torch_scatter.scatter_max(pointer_logits, fr, dim_size=num_nodes)\n pointers = to[pointers]\n pointer_probabilities = torch_geometric.utils.softmax(pointer_logits, fr, num_nodes=num_nodes)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n pointers = pointer_logits.argmax(-1)\n pointer_probabilities = F.softmax(pointer_logits, dim=-1)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n return outs\n\n def set_initial_states(self, batch, init_last_latent=None):\n self.processor.zero_lstm(batch.num_nodes) # NO-OP if processor(s) don't use LSTM\n self.last_latent = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n if init_last_latent is not None:\n self.last_latent = init_last_latent\n self.last_latent_edges = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n self.last_continue_logits = torch.ones(batch.num_graphs, device=batch.edge_index.device)\n self.last_logits = defaultdict(dict)\n\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n if name not in self.decoders[stage]:\n continue\n if stage == Stage.OUTPUT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n if data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name).unsqueeze(-1)\n if data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name).int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.last_logits[stage][name] = torch.full((batch.edge_index.shape[1], int(ptrs.max().item())+1), -1e9).to(batch.edge_index.device)\n self.last_logits[stage][name][torch.arange(ptrs.shape[0]), ptrs] = 1e9\n else:\n assert False, breakpoint()\n\n if stage == Stage.HINT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0].unsqueeze(-1)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n else:\n assert False, breakpoint()\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0, :].unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name)[0, :].int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.max_nodes_in_graph = int(ptrs.max().item())+1 # FIXME try another way to infer\n self.last_logits[stage][name] = torch.where(edge_one_hot_encode_pointers_edge(ptrs, batch, self.max_nodes_in_graph).bool(), 1e9, -1e9).to(batch.edge_index.device)\n else:\n assert False, breakpoint()\n\n self.all_hint_logits = []\n self.all_masks_graph = []\n\n def update_per_mask(self, before, after, mask=None):\n # NOTE: this does expansion of the mask, if you do\n # NOT use expansion, use torch.where\n if mask is None:\n mask = self.mask\n mask = mask.unsqueeze(-1).expand_as(before)\n return torch.where(mask, after, before)\n\n def update_state_dict(self, before, after):\n new_before = defaultdict(dict)\n for stage in after.keys():\n for name in after[stage].keys():\n _, loc, data_type = self.dataset_spec[name]\n if loc == Location.GRAPH:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_cp)\n if loc == Location.EDGE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL, Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_edges)\n else:\n assert False, \"Please implement\"\n if loc == Location.NODE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name])\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = torch.where(self.mask_edges, after[stage][name], before[stage][name])\n else:\n assert False, breakpoint()\n return new_before\n\n def update_states(self, batch, current_latent, edges_current_latent,\n logits, continue_logits):\n self.last_continue_logits = torch.where(self.mask_cp, continue_logits,\n self.last_continue_logits)\n self.last_latent = self.update_per_mask(self.last_latent, current_latent)\n self.last_latent_edges = self.update_per_mask(self.last_latent_edges, edges_current_latent, mask=self.mask_edges)\n self.last_logits = self.update_state_dict(self.last_logits, logits)\n self.all_hint_logits.append(self.last_logits['hint'])\n self.all_masks_graph.append(self.mask_cp)\n preds = type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)\n self.last_hint = preds['hint']\n self.last_output = preds['output']\n\n def prepare_initial_masks(self, batch):\n self.mask = torch.ones_like(batch.batch, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_cp = torch.ones(batch.num_graphs, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_edges = torch.ones_like(batch.edge_index[0], dtype=torch.bool, device=batch.edge_index.device)\n\n def loop_condition(self, termination, STEPS_SIZE):\n return (((not self.training and termination.any()) or\n (self.training and termination.any())) and\n self.step_idx+1 < STEPS_SIZE)\n\n def loop_body(self,\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=1000):\n\n current_latent, edges_current_latent, preds, continue_logits =\\\n self.forward(\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n first_n_processors=first_n_processors,\n )\n termination = continue_logits\n\n self.debug_batch = batch\n self.debug_hint_out_curr = hint_out_curr\n if self.timeit:\n st = time.time()\n self.update_states(batch, current_latent, edges_current_latent, preds, termination)\n if self.timeit:\n print(f'updating states: {time.time()-st}')\n\n def get_step_input(self, x_curr, batch):\n if self.training and self.use_TF or self.hardcode_outputs:\n return x_curr\n return type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)['hint']\n\n def encode_inputs(self, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.INPUT:\n continue\n if name not in self.encoders[stage]:\n continue\n data = getattr(batch, name)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n assert False, breakpoint() # we don't have it for now (B-F/MST), will figure out later\n if data_type != Type.CATEGORICAL:\n data = data.unsqueeze(-1)\n if loc == Location.EDGE:\n edge_fts += self.encoders[stage][name](data)\n if loc == Location.NODE:\n node_fts += self.encoders[stage][name](data)\n return node_fts, edge_fts\n\n def encode_hints(self, hints, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n graph_fts = torch.zeros(batch.num_graphs, self.latent_features, device=batch.edge_index.device)\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n if name not in self.encoders[stage]:\n continue\n hint = hints[name]\n if loc == Location.NODE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n node_fts = node_fts + self.encoders['hint'][name](hint)\n if loc == Location.EDGE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n edge_fts = edge_fts + self.encoders['hint'][name](hint)\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(hint, batch.edge_index)\n edge_fts = edge_fts + self.encoders['hint'][name](pred_gt_one_hot.unsqueeze(-1))\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers_edge(hint, batch, self.max_nodes_in_graph)\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n encoding = self.encoders['hint'][name][0](pred_gt_one_hot.unsqueeze(-1))\n encoding_2 = self.encoders['hint'][name][1](pred_gt_one_hot.unsqueeze(-1))\n encoding_sparse = SparseTensor(row=batch.edge_index[0], col=batch.edge_index[1], value=encoding)\n res_1 = encoding_sparse.mean(1)[batch.edge_index[0], batch.edge_index[1]-starts_edge]\n res_2 = encoding_2.mean(1)\n edge_fts += res_1 + res_2 # INPLACE\n if loc == Location.GRAPH and data_type in [Type.CATEGORICAL, Type.SCALAR, Type.MASK]:\n graph_fts = graph_fts + self.encoders['hint'][name](hint)\n return node_fts, edge_fts, graph_fts\n\n def get_input_output_hints(self, batch):\n hint_inp_curr = {}\n hint_out_curr = {}\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n hint_inp_curr[name] = getattr(batch, name)[self.step_idx]\n hint_out_curr[name] = getattr(batch, name)[self.step_idx+1]\n if 'mask' in data_type or data_type == Type.SCALAR:\n hint_inp_curr[name] = hint_inp_curr[name].unsqueeze(-1)\n hint_out_curr[name] = hint_out_curr[name].unsqueeze(-1)\n return hint_inp_curr, hint_out_curr\n\n def process(\n self,\n batch,\n EPSILON=0,\n enforced_mask=None,\n hardcode_outputs=False,\n debug=False,\n first_n_processors=1000,\n init_last_latent=None,\n **kwargs):\n\n SIZE, STEPS_SIZE = prepare_constants(batch)\n self.hardcode_outputs = hardcode_outputs\n\n # Pytorch Geometric batches along the node dimension, but we execute\n # along the temporal (step) dimension, hence we need to transpose\n # a few tensors. Done by `prepare_batch`.\n if self.assert_checks:\n check_edge_index_sorted(batch.edge_index)\n if self.epoch > self.debug_epoch_threshold:\n breakpoint()\n self.zero_steps()\n batch = type(self).prepare_batch(batch)\n # When we want to calculate last step metrics/accuracies\n # we need to take into account again different termination per graph\n # hence we save last step tensors (e.g. outputs) into their\n # corresponding tensor. The function below prepares these tensors\n # (all set to zeros, except masking for computation, which are ones)\n self.set_initial_states(batch, init_last_latent=init_last_latent)\n # Prepare masking tensors (each graph does at least 1 iteration of the algo)\n self.prepare_initial_masks(batch)\n # A flag if we had a wrong graph in the batch. Used for visualisation\n # of what went wrong\n self.wrong_flag = False\n assert self.mask_cp.all(), self.mask_cp\n if self.timeit:\n st = time.time()\n node_fts_inp, edge_fts_inp = self.encode_inputs(batch)\n if self.timeit:\n print(f'encoding inputs: {time.time()-st}')\n\n while True:\n hint_inp_curr, hint_out_curr = self.get_input_output_hints(batch)\n if not self.training:\n assert (self.last_continue_logits > 0).any() or True\n\n # Some algorithms output fewer values than they take\n # so if we reuse our last step outputs, they need to be fed back in.\n if self.timeit:\n st = time.time()\n hint_inp_curr = self.get_step_input(hint_inp_curr, batch)\n if self.timeit:\n print(f'getting step input : {time.time()-st}')\n st = time.time()\n node_fts_hint, edge_fts_hint, graph_fts = self.encode_hints(hint_inp_curr, batch)\n node_fts = node_fts_inp + node_fts_hint\n edge_fts = edge_fts_inp + edge_fts_hint\n if self.timeit:\n print(f'encoding hints: {time.time()-st}')\n\n true_termination = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n\n # Does one iteration of the algo and accumulates statistics\n self.loop_body(batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=first_n_processors)\n # And calculate what graphs would execute on the next step.\n self.mask_cp, self.mask, self.mask_edges = type(self).get_masks(self.training, batch, true_termination if self.training else self.last_continue_logits, enforced_mask)\n if not self.loop_condition(\n self.mask_cp,\n STEPS_SIZE):\n break\n assert self.mask_cp.any()\n self.step_idx += 1\n\n return self.all_hint_logits, self.last_logits, self.all_masks_graph\n\n def decode(self, batch, encoded_nodes, hidden, edge_fts, graph_fts):\n catted = torch.cat((encoded_nodes, hidden), dim=1)\n outs = defaultdict(dict)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n\n if loc == Location.NODE:\n\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name](catted)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n prod = self.decoders[stage][name][3](to.max(fr+edge)).squeeze(-1)\n if data_type in [Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION] and self.use_sinkhorn:\n prod = torch.maximum(prod, self.decoders[stage][name][3](fr.max(to+edge)).squeeze(-1))\n prod = sinkhorn_normalize(batch, prod, temperature=0.1, steps=10 if self.training else 60, add_noise=self.training)\n outs[stage][name] = prod\n\n if loc == Location.GRAPH:\n aggr_node_fts = torch_scatter.scatter_max(catted, batch.batch, dim=0)[0]\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name][0](aggr_node_fts) + self.decoders[stage][name][1](graph_fts)\n else:\n assert False\n\n if loc == Location.EDGE:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n if data_type in (Type.CATEGORICAL, Type.MASK, Type.SCALAR):\n outs[stage][name] = fr + to + edge\n elif data_type == Type.POINTER:\n pred = fr + to + edge\n pred_2 = self.decoders[stage][name][3](catted)\n ebatch = batch.edge_index_batch\n st = batch.ptr[ebatch]\n en = batch.ptr[ebatch+1]\n dense_pred_2, mask_pred_2 = tg_utils.to_dense_batch(pred_2, batch=batch.batch)\n edge_pred_2 = dense_pred_2[ebatch]\n mask_edge_pred_2 = mask_pred_2[ebatch]\n probs_logits = self.decoders[stage][name][4](torch.maximum(pred[:, None, :], edge_pred_2)).squeeze(-1)\n probs_logits[~mask_edge_pred_2] = -1e9\n outs[stage][name] = probs_logits\n else:\n assert False\n\n return outs\n\n def encode_nodes(self, current_input, last_latent):\n return torch.cat((current_input, last_latent), dim=1)\n\n def forward(self, batch, node_fts, edge_fts, graph_fts, first_n_processors=1000):\n if torch.isnan(node_fts).any():\n breakpoint()\n assert not torch.isnan(self.last_latent).any()\n assert not torch.isnan(node_fts).any()\n if self.timeit:\n st = time.time()\n if self.timeit:\n print(f'projecting nodes: {time.time()-st}')\n\n if self.timeit:\n st = time.time()\n edge_index = batch.edge_index\n hidden, edges_hidden = self.processor(node_fts, edge_fts, graph_fts, edge_index, self.last_latent, self.last_latent_edges, first_n_processors=first_n_processors, batch=batch)\n if self.timeit:\n print(f'message passing: {time.time()-st}')\n assert not torch.isnan(hidden).any()\n if self.timeit:\n st = time.time()\n if self.triplet_reasoning:\n edge_fts = self.triplet_reductor(torch.cat([edge_fts, edges_hidden], dim=-1))\n outs = self.decode(batch, node_fts, hidden, edge_fts, graph_fts)\n if self.timeit:\n print(f'decoding hints: {time.time()-st}')\n continue_logits = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n return hidden, edges_hidden, outs, continue_logits" }, { "identifier": "LitAlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class LitAlgorithmReasoner(pl.LightningModule):\n def __init__(self,\n hidden_dim,\n algo_processor,\n dataset_class,\n dataset_root,\n dataset_kwargs,\n algorithm='mst_prim',\n update_edges_hidden=False,\n use_TF=False,\n use_sinkhorn=True,\n xavier_on_scalars=True,\n learning_rate=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay'],\n test_with_val=False,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n **algorithm_base_kwargs):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.dataset_class = dataset_class\n self.dataset_root = dataset_root\n self.dataset_kwargs = dataset_kwargs\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.timeit = False\n self.update_edges_hidden = update_edges_hidden\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.algorithm = algorithm\n self.xavier_on_scalars = xavier_on_scalars\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self._datasets = {}\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self._current_epoch = 0\n self.load_dataset('train')\n\n self.algorithm_module = AlgorithmReasoner(self.dataset.spec,\n self.dataset[0],\n hidden_dim,\n algo_processor,\n update_edges_hidden=update_edges_hidden,\n use_TF=use_TF,\n use_sinkhorn=use_sinkhorn,\n timeit=self.timeit,\n xavier_on_scalars=xavier_on_scalars,\n **algorithm_base_kwargs)\n self.save_hyperparameters(ignore=['algo_processor'])\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch in the ``Trainer``, or 0 if not attached.\"\"\"\n return self.trainer.current_epoch if self._trainer else self._current_epoch\n\n @current_epoch.setter\n def current_epoch(self, epoch) -> int:\n self._current_epoch = epoch\n\n def prepare_for_transfer(self):\n algo_processor = copy.deepcopy(self.algorithm_module.processor)\n self.algorithm_module = AlgorithmReasoner(self.hidden_dim,\n self.node_features,\n self.edge_features,\n self.output_features,\n algo_processor,\n use_TF=False,\n timeit=self.timeit,\n **self.algorithm_base_kwargs)\n for p in self.algorithm_module.processor.parameters():\n p.requires_grad = False\n\n @staticmethod\n def pointer_loss(predecessor_pred, predecessor_gt_edge_1h,\n softmax_idx, num_nodes):\n loss_unreduced = cross_entropy(predecessor_pred, softmax_idx, predecessor_gt_edge_1h, num_nodes)\n sum_loss = loss_unreduced.flatten().sum()\n cnt_loss = predecessor_gt_edge_1h.count_nonzero()\n return sum_loss / cnt_loss\n\n def single_prediction_loss(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n loss = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[graph_mask], pred_gt[graph_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n\n if loc == Location.NODE:\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(pred_gt, batch.edge_index)\n loss = type(self).pointer_loss(\n pred[edge_mask],\n pred_gt_one_hot[edge_mask],\n batch.edge_index[0][edge_mask], batch.num_nodes)\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.MASK_ONE:\n lsms = torch_scatter.scatter_log_softmax(pred[node_mask], batch.batch[node_mask].unsqueeze(-1), dim=0)\n loss = (-lsms[(pred_gt[node_mask] == 1.)]).mean()\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[node_mask], pred_gt[node_mask].argmax(-1))\n if loc == Location.EDGE:\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[edge_mask], pred_gt[edge_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n loss = F.cross_entropy(\n pred[edge_mask],\n pred_gt[edge_mask])\n assert loss is not None, f'{stage}/{name}/{loc}/{data_type}'\n return loss\n\n def get_step_loss(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n if self.timeit:\n st = time.time()\n batch = self.algorithm_module.prepare_batch(batch)\n losses_dict = defaultdict(list)\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n assert graph_mask.any()\n for name in pred:\n stage, loc, data_type = self.dataset.spec[name]\n pred_gt = getattr(batch, name)[i+1]\n losses_dict[name].append(\n self.single_prediction_loss(name, pred[name], pred_gt,\n batch, graph_mask, node_mask,\n edge_mask))\n\n for name in output_logits:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n losses_dict[name].append(\n self.single_prediction_loss(name, output_logits[name],\n getattr(batch, name), batch,\n graph_mask, node_mask, edge_mask))\n\n for k, v in losses_dict.items():\n losses_dict[k] = torch.stack(v).mean()\n if self.timeit:\n print(f'loss calculation: {time.time()-st}')\n input()\n\n return losses_dict\n\n def single_prediction_acc(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n acc = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.NODE:\n if data_type == Type.MASK_ONE:\n # try:\n acc = (pred[node_mask].squeeze(-1).nonzero() == pred_gt[node_mask].nonzero()).float().mean()\n # except Exception as e:\n # breakpoint()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION, Type.MASK]:\n acc = (pred[node_mask].squeeze(-1) == pred_gt[node_mask]).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[node_mask].squeeze(-1) - pred_gt[node_mask])**2).mean()\n if data_type == Type.CATEGORICAL:\n acc = (pred[node_mask].argmax(-1) == pred_gt[node_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[node_mask].squeeze(-1), pred_gt[node_mask])\n\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n acc = (pred[graph_mask].argmax(-1) == pred_gt[graph_mask].argmax(-1)).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[graph_mask].squeeze(-1) - pred_gt[graph_mask])**2).mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[graph_mask].squeeze(-1), pred_gt[graph_mask])\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n acc = (pred[edge_mask].argmax(-1) == pred_gt[edge_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[edge_mask].squeeze(-1), pred_gt[edge_mask])\n if data_type == Type.SCALAR:\n acc = ((pred[edge_mask].squeeze(-1) - pred_gt[edge_mask])**2).mean()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n acc = (pred[edge_mask] == pred_gt[edge_mask]).float().mean()\n assert acc is not None, f\"Please implement {name}\"\n return acc\n\n def get_metrics(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n batch = self.algorithm_module.prepare_batch(batch)\n accs_dict = defaultdict(list)\n\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec, {'hint': pred},\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['hint']\n\n for name in outputs:\n acc = self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name)[i+1],\n batch,\n graph_mask,\n node_mask,\n edge_mask)\n accs_dict[name].append(acc)\n\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec,\n output_logits,\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['output']\n for name in outputs:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n accs_dict[name].append(\n self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name),\n batch,\n graph_mask,\n node_mask,\n edge_mask))\n\n for k, v in accs_dict.items():\n accs_dict[k] = torch.stack(v).mean()\n\n return accs_dict\n\n def fwd_step(self, batch, batch_idx):\n if self.timeit:\n st = time.time()\n self.algorithm_module.epoch = self.current_epoch\n all_hint_logits, output_logits, masks = self.algorithm_module.process(batch)\n if self.timeit:\n print(f'forward step: {time.time()-st}')\n input()\n return all_hint_logits, output_logits, masks\n\n def training_step(self, batch, batch_idx):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'train/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs)\n total_loss = sum(losses_dict.values()) / len(losses_dict)\n self.log('train/loss/average_loss', total_loss, prog_bar=False, on_step=True, on_epoch=True, batch_size=batch.num_graphs)\n accs_dict = {}\n if self.current_epoch % self.test_train_every_n_epoch == 0:\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'train/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n # if sum(losses_dict.values()) > 1e5:\n # breakpoint()\n return {'loss': total_loss, 'losses_dict': losses_dict, 'accuracies': accs_dict}\n\n def valtest_step(self, batch, batch_idx, mode):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'{mode}/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n if torch.isnan(sum(losses_dict.values())).any():\n breakpoint()\n self.log(f'{mode}/loss/average_loss', sum(losses_dict.values()) / len(losses_dict), batch_size=batch.num_graphs, add_dataloader_idx=False)\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'{mode}/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n return {'losses': losses_dict, 'accuracies': accs_dict}\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.fwd_step(batch, batch_idx)\n\n def load_dataset(self, split, suffix=''):\n split = split+suffix\n nn = CONFIGS[self.algorithm][split]['num_nodes']\n self.dataset_kwargs['split'] = split\n if (split, nn) not in self._datasets:\n self._datasets[(split, nn)] = self.dataset_class(\n self.dataset_root,\n nn,\n CONFIGS[self.algorithm][split]['num_samples'],\n algorithm=self.algorithm,\n **self.dataset_kwargs)\n self.dataset = self._datasets[(split, nn)]\n print(f'Loading {self.dataset=} (num nodes: {nn}) with kwargs')\n pprint(self.dataset_kwargs)\n print()\n\n def get_a_loader(self, split, suffix=''):\n self.load_dataset(split, suffix='')\n self.algorithm_module.dataset_spec = self.dataset.spec\n dl = DataLoader(self.dataset,\n batch_size=get_hyperparameters()['batch_size'],\n shuffle=True if split == 'train' else False,\n drop_last=False,\n follow_batch=['edge_index'],\n num_workers=1,\n persistent_workers=True)\n return dl\n\n def train_dataloader(self):\n return self.get_a_loader('train')\n\n def val_dataloader_alt(self):\n return [self.get_a_loader('val'), self.get_a_loader('test')]\n\n def val_dataloader(self):\n return self.get_a_loader('val')\n\n def test_dataloader(self, suffix=''):\n return self.get_a_loader('test'+suffix)\n\n def configure_optimizers(self):\n lr = self.learning_rate\n wd = self.weight_decay\n optimizer = optim.Adam(self.parameters(),\n weight_decay=wd,\n lr=lr)\n return optimizer" }, { "identifier": "get_number_of_nodes", "path": "utils_execution.py", "snippet": "def get_number_of_nodes(algorithm, split):\n nns = CONFIGS[algorithm][split]['num_nodes']\n if isinstance(nns, int):\n nns = [nns]\n return nns" }, { "identifier": "get_hyperparameters", "path": "hyperparameters.py", "snippet": "def get_hyperparameters():\n return {\n 'dim_latent': 128,\n 'num_bits': 8,\n 'weight_decay': 0,\n 'lr': 0.0003,\n 'nee_warmup_steps': 4000,\n 'dim_nodes_mst_prim': 1,\n 'dim_target_mst_prim': 1,\n 'device': 'cuda',\n 'batch_size': 64,\n 'bias': True,\n 'seed': 47, # for dataset generation\n 'calculate_termination_statistics': False,\n }" }, { "identifier": "CONFIGS", "path": "datasets/_configs.py", "snippet": "CONFIGS = defaultdict(lambda: _DEFAULT_CONFIG)" } ]
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from utils_execution import get_number_of_nodes from hyperparameters import get_hyperparameters from datasets._configs import CONFIGS import torch import torch_geometric import torch_geometric.utils as tg_utils import torch_scatter import networkx as nx
13,314
class LitVKCReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = AlgorithmReasoner( self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_VKC_metrics(self, batch, output_logits): selected_dense = torch_geometric.utils.to_dense_batch(output_logits['output']['selected'], batch=batch.batch)[0] selected_dense_topk = torch.sort(torch.topk(selected_dense.squeeze(-1), self.dataset.k, dim=-1).indices).values selected_topk = (selected_dense_topk+batch.ptr[:-1].unsqueeze(-1)).view(-1) selected_topk_gt = batch.selected.nonzero().squeeze(-1) selected_batch = batch.batch[selected_topk] acc_selected_topk = torch_scatter.scatter_mean((selected_topk == selected_topk_gt).float(), selected_batch).mean() G = tg_utils.to_networkx(batch, to_undirected=True, edge_attrs=['edge_attr']) mspl = nx.multi_source_dijkstra_path_length(G, sources=selected_topk.tolist(), weight='edge_attr') mspl = torch.tensor([mspl[i] for i in range(batch.num_nodes)]).to(selected_dense) farthest = torch_scatter.scatter_max(mspl, batch.batch)[0] assert (farthest + torch.finfo(torch.float32).eps >= batch.farthest).all() return { 'acc_topk': acc_selected_topk, 'farthest': farthest.mean(), 'farthest_gt': batch.farthest.mean(), 'farthest_relative_error': ((farthest-batch.farthest)/batch.farthest).mean(), } def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_VKC_metrics(batch, output_logits)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix
class LitVKCReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = AlgorithmReasoner( self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_VKC_metrics(self, batch, output_logits): selected_dense = torch_geometric.utils.to_dense_batch(output_logits['output']['selected'], batch=batch.batch)[0] selected_dense_topk = torch.sort(torch.topk(selected_dense.squeeze(-1), self.dataset.k, dim=-1).indices).values selected_topk = (selected_dense_topk+batch.ptr[:-1].unsqueeze(-1)).view(-1) selected_topk_gt = batch.selected.nonzero().squeeze(-1) selected_batch = batch.batch[selected_topk] acc_selected_topk = torch_scatter.scatter_mean((selected_topk == selected_topk_gt).float(), selected_batch).mean() G = tg_utils.to_networkx(batch, to_undirected=True, edge_attrs=['edge_attr']) mspl = nx.multi_source_dijkstra_path_length(G, sources=selected_topk.tolist(), weight='edge_attr') mspl = torch.tensor([mspl[i] for i in range(batch.num_nodes)]).to(selected_dense) farthest = torch_scatter.scatter_max(mspl, batch.batch)[0] assert (farthest + torch.finfo(torch.float32).eps >= batch.farthest).all() return { 'acc_topk': acc_selected_topk, 'farthest': farthest.mean(), 'farthest_gt': batch.farthest.mean(), 'farthest_relative_error': ((farthest-batch.farthest)/batch.farthest).mean(), } def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_VKC_metrics(batch, output_logits)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix
nns = get_number_of_nodes(self.algorithm, split)
2
2023-11-20 15:32:43+00:00
16k
harisankar95/pathfinding3D
test/test_path.py
[ { "identifier": "DiagonalMovement", "path": "pathfinding3d/core/diagonal_movement.py", "snippet": "class DiagonalMovement:\n always = 1\n never = 2\n if_at_most_one_obstacle = 3\n only_when_no_obstacle = 4" }, { "identifier": "Grid", "path": "pathfinding3d/core/grid.py", "snippet": "class Grid:\n def __init__(\n self,\n width: int = 0,\n height: int = 0,\n depth: int = 0,\n matrix: MatrixType = None,\n grid_id: Optional[int] = None,\n inverse: bool = False,\n ):\n \"\"\"\n A grid represents the map (as 3d-list of nodes).\n\n Parameters\n ----------\n width : int, optional\n The width of the grid.\n height : int, optional\n The height of the grid.\n depth : int, optional\n The depth of the grid.\n matrix : MatrixType\n A 3D array of values (numbers or objects specifying weight)\n that determine how nodes are connected and if they are walkable.\n If no matrix is given, all nodes will be walkable.\n inverse : bool, optional\n If true, all values in the matrix that are not 0 will be considered\n walkable. Otherwise all values that are 0 will be considered walkable.\n \"\"\"\n self.width, self.height, self.depth = self._validate_dimensions(width, height, depth, matrix)\n self.nodes = (\n build_nodes(self.width, self.height, self.depth, matrix, inverse, grid_id)\n if self.is_valid_grid()\n else [[[]]]\n )\n\n def _validate_dimensions(self, width: int, height: int, depth: int, matrix: MatrixType) -> tuple:\n if matrix is not None:\n if not (\n isinstance(matrix, (list, np.ndarray))\n and len(matrix) > 0\n and len(matrix[0]) > 0\n and len(matrix[0][0]) > 0\n ):\n raise ValueError(\"Provided matrix is not a 3D structure or is empty.\")\n return len(matrix), len(matrix[0]), len(matrix[0][0])\n return width, height, depth\n\n def is_valid_grid(self) -> bool:\n return self.width > 0 and self.height > 0 and self.depth > 0\n\n def node(self, x: int, y: int, z: int) -> Optional[GridNode]:\n \"\"\"\n Get node at position\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n GridNode\n node at position\n \"\"\"\n return self.nodes[x][y][z] if self.inside(x, y, z) else None\n\n def inside(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if field position is inside map\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map\n \"\"\"\n return 0 <= x < self.width and 0 <= y < self.height and 0 <= z < self.depth\n\n def walkable(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if the tile is inside grid and if it is set as walkable\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map and walkable\n \"\"\"\n return self.inside(x, y, z) and self.nodes[x][y][z].walkable\n\n def calc_cost(self, node_a: GridNode, node_b: GridNode, weighted: bool = False) -> float:\n \"\"\"\n Get the distance between current node and the neighbor (cost)\n\n Parameters\n ----------\n node_a : GridNode\n current node\n node_b : GridNode\n neighbor node\n weighted : bool, optional\n True, if weighted algorithm is used, by default False\n\n Returns\n -------\n float\n distance between current node and the neighbor (cost)\n \"\"\"\n # Check if we have a straight, diagonal in plane or diagonal in space\n dx = node_b.x - node_a.x\n dy = node_b.y - node_a.y\n dz = node_b.z - node_a.z\n\n ng = math.sqrt(dx * dx + dy * dy + dz * dz)\n\n # weight for weighted algorithms\n if weighted:\n ng *= node_b.weight\n\n return ng\n\n def neighbors(\n self,\n node: GridNode,\n diagonal_movement: int = DiagonalMovement.never,\n ) -> List[GridNode]:\n \"\"\"\n Get all neighbors of one node\n\n Parameters\n ----------\n node : GridNode\n node to get neighbors from\n diagonal_movement : int, optional\n if diagonal movement is allowed\n (see enum in diagonal_movement), by default DiagonalMovement.never\n\n Returns\n -------\n list\n list of neighbor nodes\n \"\"\"\n x, y, z = node.x, node.y, node.z\n\n neighbors = []\n # current plane\n cs0 = cd0 = cs1 = cd1 = cs2 = cd2 = cs3 = cd3 = False\n # upper plane\n us0 = ud0 = us1 = ud1 = us2 = ud2 = us3 = ud3 = ut = False # ut = upper top\n # lower plane\n ls0 = ld0 = ls1 = ld1 = ls2 = ld2 = ls3 = ld3 = lb = False # lb = lower bottom\n\n # -y\n if self.walkable(x, y - 1, z):\n neighbors.append(self.nodes[x][y - 1][z])\n cs0 = True\n\n # +x\n if self.walkable(x + 1, y, z):\n neighbors.append(self.nodes[x + 1][y][z])\n cs1 = True\n\n # +y\n if self.walkable(x, y + 1, z):\n neighbors.append(self.nodes[x][y + 1][z])\n cs2 = True\n\n # -x\n if self.walkable(x - 1, y, z):\n neighbors.append(self.nodes[x - 1][y][z])\n cs3 = True\n\n # +z\n if self.walkable(x, y, z + 1):\n neighbors.append(self.nodes[x][y][z + 1])\n ut = True\n\n # -z\n if self.walkable(x, y, z - 1):\n neighbors.append(self.nodes[x][y][z - 1])\n lb = True\n\n # check for connections to other grids\n if node.connections:\n neighbors.extend(node.connections)\n\n if diagonal_movement == DiagonalMovement.never:\n return neighbors\n\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n cd0 = cs0 and cs1\n cd1 = cs1 and cs2\n cd2 = cs2 and cs3\n cd3 = cs3 and cs0\n\n us0 = cs0 and ut\n us1 = cs1 and ut\n us2 = cs2 and ut\n us3 = cs3 and ut\n\n ls0 = cs0 and lb\n ls1 = cs1 and lb\n ls2 = cs2 and lb\n ls3 = cs3 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n cd0 = cs0 or cs1\n cd1 = cs1 or cs2\n cd2 = cs2 or cs3\n cd3 = cs3 or cs0\n\n us0 = cs0 or ut\n us1 = cs1 or ut\n us2 = cs2 or ut\n us3 = cs3 or ut\n\n ls0 = cs0 or lb\n ls1 = cs1 or lb\n ls2 = cs2 or lb\n ls3 = cs3 or lb\n\n elif diagonal_movement == DiagonalMovement.always:\n cd0 = cd1 = cd2 = cd3 = True\n us0 = us1 = us2 = us3 = True\n ls0 = ls1 = ls2 = ls3 = True\n\n # +x -y\n if cd0 and self.walkable(x + 1, y - 1, z):\n neighbors.append(self.nodes[x + 1][y - 1][z])\n else:\n cd0 = False\n\n # +x +y\n if cd1 and self.walkable(x + 1, y + 1, z):\n neighbors.append(self.nodes[x + 1][y + 1][z])\n else:\n cd1 = False\n\n # -x +y\n if cd2 and self.walkable(x - 1, y + 1, z):\n neighbors.append(self.nodes[x - 1][y + 1][z])\n else:\n cd2 = False\n\n # -x -y\n if cd3 and self.walkable(x - 1, y - 1, z):\n neighbors.append(self.nodes[x - 1][y - 1][z])\n else:\n cd3 = False\n\n # -y +z\n if us0 and self.walkable(x, y - 1, z + 1):\n neighbors.append(self.nodes[x][y - 1][z + 1])\n else:\n us0 = False\n\n # +x +z\n if us1 and self.walkable(x + 1, y, z + 1):\n neighbors.append(self.nodes[x + 1][y][z + 1])\n else:\n us1 = False\n\n # +y +z\n if us2 and self.walkable(x, y + 1, z + 1):\n neighbors.append(self.nodes[x][y + 1][z + 1])\n else:\n us2 = False\n\n # -x +z\n if us3 and self.walkable(x - 1, y, z + 1):\n neighbors.append(self.nodes[x - 1][y][z + 1])\n else:\n us3 = False\n\n # -y -z\n if ls0 and self.walkable(x, y - 1, z - 1):\n neighbors.append(self.nodes[x][y - 1][z - 1])\n else:\n ls0 = False\n\n # +x -z\n if ls1 and self.walkable(x + 1, y, z - 1):\n neighbors.append(self.nodes[x + 1][y][z - 1])\n else:\n ls1 = False\n\n # +y -z\n if ls2 and self.walkable(x, y + 1, z - 1):\n neighbors.append(self.nodes[x][y + 1][z - 1])\n else:\n ls2 = False\n\n # -x -z\n if ls3 and self.walkable(x - 1, y, z - 1):\n neighbors.append(self.nodes[x - 1][y][z - 1])\n else:\n ls3 = False\n\n # remaining daigonal neighbors\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n ud0 = cs0 and cd0 and cs1 and us0 and us1 and ut\n ud1 = cs1 and cd1 and cs2 and us1 and us2 and ut\n ud2 = cs2 and cd2 and cs3 and us2 and us3 and ut\n ud3 = cs3 and cd3 and cs0 and us3 and us0 and ut\n\n ld0 = cs0 and cd0 and cs1 and ls0 and ls1 and lb\n ld1 = cs1 and cd1 and cs2 and ls1 and ls2 and lb\n ld2 = cs2 and cd2 and cs3 and ls2 and ls3 and lb\n ld3 = cs3 and cd3 and cs0 and ls3 and ls0 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n ud0 = sum([cs0, cd0, cs1, us0, us1, ut]) >= 5\n ud1 = sum([cs1, cd1, cs2, us1, us2, ut]) >= 5\n ud2 = sum([cs2, cd2, cs3, us2, us3, ut]) >= 5\n ud3 = sum([cs3, cd3, cs0, us3, us0, ut]) >= 5\n\n ld0 = sum([cs0, cd0, cs1, ls0, ls1, lb]) >= 5\n ld1 = sum([cs1, cd1, cs2, ls1, ls2, lb]) >= 5\n ld2 = sum([cs2, cd2, cs3, ls2, ls3, lb]) >= 5\n ld3 = sum([cs3, cd3, cs0, ls3, ls0, lb]) >= 5\n\n elif diagonal_movement == DiagonalMovement.always:\n ud0 = ud1 = ud2 = ud3 = True\n ld0 = ld1 = ld2 = ld3 = True\n\n # +x -y +z\n if ud0 and self.walkable(x + 1, y - 1, z + 1):\n neighbors.append(self.nodes[x + 1][y - 1][z + 1])\n\n # +x +y +z\n if ud1 and self.walkable(x + 1, y + 1, z + 1):\n neighbors.append(self.nodes[x + 1][y + 1][z + 1])\n\n # -x +y +z\n if ud2 and self.walkable(x - 1, y + 1, z + 1):\n neighbors.append(self.nodes[x - 1][y + 1][z + 1])\n\n # -x -y +z\n if ud3 and self.walkable(x - 1, y - 1, z + 1):\n neighbors.append(self.nodes[x - 1][y - 1][z + 1])\n\n # +x -y -z\n if ld0 and self.walkable(x + 1, y - 1, z - 1):\n neighbors.append(self.nodes[x + 1][y - 1][z - 1])\n\n # +x +y -z\n if ld1 and self.walkable(x + 1, y + 1, z - 1):\n neighbors.append(self.nodes[x + 1][y + 1][z - 1])\n\n # -x +y -z\n if ld2 and self.walkable(x - 1, y + 1, z - 1):\n neighbors.append(self.nodes[x - 1][y + 1][z - 1])\n\n # -x -y -z\n if ld3 and self.walkable(x - 1, y - 1, z - 1):\n neighbors.append(self.nodes[x - 1][y - 1][z - 1])\n\n return neighbors\n\n def cleanup(self):\n \"\"\"\n Cleanup grid\n \"\"\"\n for x_nodes in self.nodes:\n for y_nodes in x_nodes:\n for z_node in y_nodes:\n z_node.cleanup()" }, { "identifier": "GridNode", "path": "pathfinding3d/core/node.py", "snippet": "class GridNode(Node):\n \"\"\"\n basic node, saves X, Y and Z coordinates on some grid and determine if\n it is walkable.\n \"\"\"\n\n # Coordinates\n x: int = 0\n y: int = 0\n z: int = 0\n\n # Wether this node can be walked through.\n walkable: bool = True\n\n # used for weighted algorithms\n weight: float = 1.0\n\n # grid_id is used if we have more than one grid,\n # normally we just count our grids by number\n # but you can also use a string here.\n # Set it to None if you only have one grid.\n grid_id: Optional[int] = None\n\n connections: Optional[List] = None\n\n identifier: Optional[Tuple] = None\n\n def __post_init__(self):\n super().__init__()\n # for heap\n self.identifier: Tuple = (\n (self.x, self.y, self.z) if self.grid_id is None else (self.x, self.y, self.z, self.grid_id)\n )\n\n def __iter__(self):\n yield self.x\n yield self.y\n yield self.z\n if self.grid_id is not None:\n yield self.grid_id\n\n def connect(self, other_node: \"GridNode\"):\n if not self.connections:\n self.connections = [other_node]\n else:\n self.connections.append(other_node)" }, { "identifier": "AStarFinder", "path": "pathfinding3d/finder/a_star.py", "snippet": "class AStarFinder(Finder):\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using A* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n if not heuristic:\n if diagonal_movement == DiagonalMovement.never:\n self.heuristic = manhattan\n else:\n # When diagonal movement is allowed the manhattan heuristic is\n # not admissible it should be octile instead\n self.heuristic = octile\n\n def check_neighbors(\n self,\n start: GridNode,\n end: GridNode,\n grid: Grid,\n open_list: List,\n open_value: int = 1,\n backtrace_by=None,\n ) -> Optional[List[GridNode]]:\n \"\"\"\n Find next path segment based on given node\n (or return path if we found the end)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n open_list : List\n stores nodes that will be processed next\n\n Returns\n -------\n Optional[List[GridNode]]\n path\n \"\"\"\n\n # pop node with minimum 'f' value\n node = open_list.pop_node()\n node.closed = True\n\n # if reached the end position, construct the path and return it\n # (ignored for bi-directional a*, there we look for a neighbor that is\n # part of the oncoming path)\n if not backtrace_by and node == end:\n return backtrace(end)\n\n # get neighbors of the current node\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed:\n # already visited last minimum f value\n continue\n if backtrace_by and neighbor.opened == backtrace_by:\n # found the oncoming path\n if backtrace_by == BY_END:\n return bi_backtrace(node, neighbor)\n\n return bi_backtrace(neighbor, node)\n\n # check if the neighbor has not been inspected yet, or\n # can be reached with smaller cost from the current node\n self.process_node(grid, neighbor, node, end, open_list, open_value)\n\n # the end has not been reached (yet) keep the find_path loop running\n return None\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the A* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n\n start.g = 0\n start.f = 0\n return super().find_path(start, end, grid)" }, { "identifier": "BestFirst", "path": "pathfinding3d/finder/best_first.py", "snippet": "class BestFirst(AStarFinder):\n \"\"\"\n Similar to the default A* algorithm from a_star.\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using BestFirst algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n self.weighted = False\n\n def apply_heuristic(self, node_a: GridNode, node_b: GridNode, heuristic: Optional[Callable] = None) -> float:\n \"\"\"\n Helper function to apply heuristic\n\n Parameters\n ----------\n node_a : GridNode\n first node\n node_b : GridNode\n second node\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n\n Returns\n -------\n float\n heuristic value\n \"\"\"\n return super().apply_heuristic(node_a, node_b, heuristic) * 1000000" }, { "identifier": "BiAStarFinder", "path": "pathfinding3d/finder/bi_a_star.py", "snippet": "class BiAStarFinder(AStarFinder):\n \"\"\"\n Similar to the default A* algorithm from a_star.\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Bi-A* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the A* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n (can be a list of grids)\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n start_open_list = SimpleHeap(start, grid)\n start.g = 0\n start.f = 0\n start.opened = BY_START\n\n end_open_list = SimpleHeap(end, grid)\n end.g = 0\n end.f = 0\n end.opened = BY_END\n\n while len(start_open_list) > 0 and len(end_open_list) > 0:\n self.runs += 1\n self.keep_running()\n path = self.check_neighbors(\n start,\n end,\n grid,\n start_open_list,\n open_value=BY_START,\n backtrace_by=BY_END,\n )\n if path:\n return path, self.runs\n\n self.runs += 1\n self.keep_running()\n path = self.check_neighbors(\n end,\n start,\n grid,\n end_open_list,\n open_value=BY_END,\n backtrace_by=BY_START,\n )\n if path:\n return path, self.runs\n\n # failed to find path\n return [], self.runs" }, { "identifier": "BreadthFirstFinder", "path": "pathfinding3d/finder/breadth_first.py", "snippet": "class BreadthFirstFinder(Finder):\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Breadth First algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n weighted=False,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n if not diagonal_movement:\n self.diagonalMovement = DiagonalMovement.never\n\n def check_neighbors(\n self,\n start: GridNode,\n end: GridNode,\n grid: Grid,\n open_list: List,\n ) -> List[GridNode]:\n \"\"\"\n Find next path segment based on given node\n (or return path if we found the end)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n open_list : List\n stores nodes that will be processed next\n\n Returns\n -------\n List[GridNode]\n path\n \"\"\"\n node = open_list.pop_node()\n node.closed = True\n\n if node == end:\n return backtrace(end)\n\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed or neighbor.opened:\n continue\n\n open_list.push_node(neighbor)\n neighbor.opened = True\n neighbor.parent = node" }, { "identifier": "DijkstraFinder", "path": "pathfinding3d/finder/dijkstra.py", "snippet": "class DijkstraFinder(AStarFinder):\n def __init__(\n self,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Dijkstra algorithm\n\n Parameters\n ----------\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=null,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n def apply_heuristic(self, node_a: Node, node_b: Node, heuristic: Optional[Callable] = None) -> float:\n \"\"\"\n Helper function to apply heuristic\n\n Parameters\n ----------\n node_a : Node\n first node\n node_b : Node\n second node\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n\n Returns\n -------\n float\n 0.0\n \"\"\"\n return 0.0" }, { "identifier": "ExecutionRunsException", "path": "pathfinding3d/finder/finder.py", "snippet": "class ExecutionRunsException(Exception):\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "ExecutionTimeException", "path": "pathfinding3d/finder/finder.py", "snippet": "class ExecutionTimeException(Exception):\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "IDAStarFinder", "path": "pathfinding3d/finder/ida_star.py", "snippet": "class IDAStarFinder(Finder):\n \"\"\"\n Iterative Deeping A Star (IDA*) path-finder.\n\n Recursion based on:\n http://www.apl.jhu.edu/~hall/AI-Programming/IDA-Star.html\n\n Path retracing based on:\n V. Nageshwara Rao, Vipin Kumar and K. Ramesh\n \"A Parallel Implementation of Iterative-Deeping-A*\", January 1987.\n ftp://ftp.cs.utexas.edu/.snapshot/hourly.1/pub/AI-Lab/tech-reports/\n UT-AI-TR-87-46.pdf\n\n based on the JavaScript implementation by Gerard Meier\n (www.gerardmeier.com)\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n track_recursion: bool = True,\n ):\n \"\"\"\n Find shortest path using IDA* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n track_recursion : bool\n if we should track recursion\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n weighted=False,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n self.track_recursion = track_recursion\n if not heuristic:\n if diagonal_movement == DiagonalMovement.never:\n self.heuristic = manhattan\n else:\n # When diagonal movement is allowed the manhattan heuristic is\n # not admissible it should be octile instead\n self.heuristic = octile\n\n self.nodes_visited: int\n\n def search(\n self,\n node: GridNode,\n g: float,\n cutoff: float,\n path: List[GridNode],\n depth: int,\n end: GridNode,\n grid: Grid,\n ) -> Union[float, GridNode]:\n \"\"\"\n Recursive IDA* search implementation\n\n Parameters\n ----------\n node : GridNode\n current node\n g : float\n cost from start to current node\n cutoff : float\n cutoff cost\n path : List[GridNode]\n path\n depth : int\n current depth\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Union[float, GridNode]\n cutoff cost or end node\n \"\"\"\n self.runs += 1\n self.keep_running()\n\n self.nodes_visited += 1\n\n f = g + self.apply_heuristic(node, end) * self.weight\n\n # We've searched too deep for this iteration.\n if f > cutoff:\n return f\n\n if node == end:\n if len(path) < depth:\n path += [None] * (depth - len(path) + 1)\n path[depth] = node\n return node\n\n neighbors = self.find_neighbors(grid, node)\n\n # Sort the neighbors, gives nicer paths. But, this deviates\n # from the original algorithm - so I left it out\n # TODO: make this an optional parameter\n # def sort_neighbors(a, b):\n # return self.apply_heuristic(a, end) - \\\n # self.apply_heuristic(b, end)\n # sorted(neighbors, sort_neighbors)\n min_t = float(\"inf\")\n for neighbor in neighbors:\n if self.track_recursion:\n # Retain a copy for visualisation. Due to recursion, this\n # node may be part of other paths too.\n neighbor.retain_count += 1\n neighbor.tested = True\n\n t = self.search(\n neighbor,\n g + grid.calc_cost(node, neighbor),\n cutoff,\n path,\n depth + 1,\n end,\n grid,\n )\n\n if isinstance(t, GridNode):\n if len(path) < depth:\n path += [None] * (depth - len(path) + 1)\n path[depth] = node\n return t\n\n # Decrement count, then determine whether it's actually closed.\n if self.track_recursion:\n neighbor.retain_count -= 1\n if neighbor.retain_count == 0:\n neighbor.tested = False\n\n if t < min_t:\n min_t = t\n\n return min_t\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the IDA* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n self.nodes_visited = 0 # for statistics\n\n # initial search depth, given the typical heuristic contraints,\n # there should be no cheaper route possible.\n cutoff = self.apply_heuristic(start, end)\n\n while True:\n path = []\n\n # search till cut-off depth:\n t = self.search(start, 0, cutoff, path, 0, end, grid)\n\n if isinstance(t, bool) and not t:\n # only when an error occured we return \"False\"\n break\n\n # If t is a node, it's also the end node. Route is now\n # populated with a valid path to the end node.\n if isinstance(t, GridNode):\n return (\n [(node.x, node.y, node.z, node.grid_id) for node in path],\n self.runs,\n )\n\n # Try again, this time with a deeper cut-off. The t score\n # is the closest we got to the end node.\n cutoff = t\n\n return [], self.runs" }, { "identifier": "MinimumSpanningTree", "path": "pathfinding3d/finder/msp.py", "snippet": "class MinimumSpanningTree(Finder):\n \"\"\"\n Minimum Spanning Tree implementation by Brad Beattie\n (see https://github.com/brean/python-pathfinding/issues/18)\n\n The wikipedia page has a nice description about MSP:\n https://en.wikipedia.org/wiki/Minimum_spanning_tree\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.heuristic = heuristic.null\n\n def tree(self, grid: Grid, start: GridNode) -> List:\n \"\"\"\n Returns a list of nodes that are part of the minimum spanning tree\n of the grid.\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n start : GridNode\n start node\n\n Returns\n -------\n List\n \"\"\"\n\n return list(self.itertree(grid, start))\n\n def itertree(self, grid: Grid, start: GridNode):\n \"\"\"\n Returns a generator that yields nodes that are part of the minimum\n spanning tree of the grid.\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n start : GridNode\n start node\n \"\"\"\n # Finder.process_node requires an end node, which we don't have.\n # The following value tricks the call to Finder.apply_heuristic.\n # Though maybe we want to generate a limited spanning tree that\n # trends in a certain direction? In which case we'd want a more\n # nuanced solution.\n end = namedtuple(\"FakeNode\", [\"x\", \"y\", \"z\"])(-1, -1, -1)\n\n start.opened = True\n\n open_list = SimpleHeap(start, grid)\n\n while len(open_list) > 0:\n self.runs += 1\n self.keep_running()\n\n node = open_list.pop_node()\n node.closed = True\n yield node\n\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if not neighbor.closed:\n self.process_node(grid, neighbor, node, end, open_list, open_value=True)\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the Minimum Spanning\n Tree algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n for node in self.itertree(grid, start):\n if node == end:\n path = deque()\n step = node\n while step.parent:\n path.appendleft(step)\n step = step.parent\n path.appendleft(step)\n return path, self.runs\n\n return [], self.runs" } ]
import numpy as np import pytest from pathfinding3d.core.diagonal_movement import DiagonalMovement from pathfinding3d.core.grid import Grid from pathfinding3d.core.node import GridNode from pathfinding3d.finder.a_star import AStarFinder from pathfinding3d.finder.best_first import BestFirst from pathfinding3d.finder.bi_a_star import BiAStarFinder from pathfinding3d.finder.breadth_first import BreadthFirstFinder from pathfinding3d.finder.dijkstra import DijkstraFinder from pathfinding3d.finder.finder import ExecutionRunsException, ExecutionTimeException from pathfinding3d.finder.ida_star import IDAStarFinder from pathfinding3d.finder.msp import MinimumSpanningTree
11,366
weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 5 def test_max_runs(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT, max_runs=3) with pytest.raises(ExecutionRunsException): path, runs = finder.find_path(start, end, grid) print(f"{find.__name__} finishes after {runs} runs without exception") print(f"path: {path}") msg = f"{finder.__class__.__name__} needed too many iterations" assert finder.runs <= 3, msg def test_time(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=-0.1)
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 5 def test_max_runs(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT, max_runs=3) with pytest.raises(ExecutionRunsException): path, runs = finder.find_path(start, end, grid) print(f"{find.__name__} finishes after {runs} runs without exception") print(f"path: {path}") msg = f"{finder.__class__.__name__} needed too many iterations" assert finder.runs <= 3, msg def test_time(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=-0.1)
with pytest.raises(ExecutionTimeException):
9
2023-11-21 10:14:12+00:00
16k
yuukawahiroshi/ddb-tools
extract_wav.py
[ { "identifier": "DDIModel", "path": "utils/ddi_utils.py", "snippet": "class DDIModel:\n def __init__(self, ddi_bytes: bytes) -> None:\n self.ddi_bytes = ddi_bytes\n self.ddi_data = None\n self.phdc_data = {}\n self.tdb_data = {}\n self.sta_data = {}\n self.art_data = {}\n self.vqm_data = {}\n self.offset_map = {}\n\n def read(self, temp_path: Optional[str] = None, cat_only: bool = False):\n if temp_path or cat_only:\n import yaml\n\n if cat_only:\n with open(os.path.join(temp_path, 'sta.yml'), mode='r',\n encoding='utf-8') as sta_f:\n self.sta_data = yaml.load(sta_f)\n with open(os.path.join(temp_path, 'art.yml'), mode='r',\n encoding='utf-8') as art_f:\n self.art_data = yaml.load(art_f)\n vqm_data = None\n if os.path.isfile(os.path.join(temp_path, 'vqm.yml')):\n with open(os.path.join(temp_path, 'vqm.yml'), mode='r',\n encoding='utf-8') as vqm_f:\n self.vqm_data = yaml.load(vqm_f)\n else:\n self.ddi_data = io.BytesIO(self.ddi_bytes)\n # DBSe\n # Tonio.ddi has no DBSe block\n \n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 0\n # assert ddi_data.read(4).decode() == 'DBSe'\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 1\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 3\n\n # PHDC\n phdc_offset = self.ddi_bytes.find(b'PHDC')\n if phdc_offset >= 0:\n self.ddi_data.seek(phdc_offset)\n self.phdc_data = self.read_phdc()\n\n self.offset_map['phdc'] = [phdc_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'phdc.yml'), mode='w',\n encoding='utf-8') as phdc_f:\n phdc_str = yaml.dump(self.phdc_data, default_flow_style=False,\n sort_keys=False)\n phdc_f.write(phdc_str)\n\n # TDB\n tdb_offset = self.ddi_bytes.find(b'\\xFF'*8+b'TDB ')\n if tdb_offset >= 0:\n self.ddi_data.seek(tdb_offset)\n self.tdb_data = self.read_tdb()\n self.offset_map['tdb'] = [tdb_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'tdb.yml'), mode='w',\n encoding='utf-8') as tdb_f:\n tdb_str = yaml.dump(self.tdb_data, default_flow_style=False,\n sort_keys=False)\n tdb_f.write(tdb_str)\n\n # DBV\n dbv_offset = self.ddi_bytes.find(b'\\x00'*8+b'DBV ')\n self.ddi_data.seek(dbv_offset)\n self.read_dbv()\n self.offset_map['dbv'] = [dbv_offset, self.ddi_data.tell()]\n\n # STA\n sta_offset = self.ddi_bytes.find(b'\\x00'*8+b'STA ')\n sta_offset = reverse_search(self.ddi_bytes, b'ARR ', sta_offset) - 8\n self.ddi_data.seek(sta_offset)\n self.sta_data = self.read_sta()\n self.offset_map['sta'] = [sta_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'sta.yml'), mode='w',\n encoding='utf-8') as sta_f:\n sta_str = yaml.dump(self.sta_data, default_flow_style=False,\n sort_keys=False)\n sta_f.write(sta_str)\n\n # ART\n art_offset = self.ddi_bytes.find(b'\\x00'*8+b'ART ')\n art_offset = reverse_search(self.ddi_bytes, b'ARR ', art_offset) - 8\n self.ddi_data.seek(art_offset)\n self.art_data = self.read_art()\n self.offset_map['art'] = [art_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'art.yml'), mode='w',\n encoding='utf-8') as art_f:\n art_str = yaml.dump(self.art_data, default_flow_style=False,\n sort_keys=False)\n art_f.write(art_str)\n\n # VQM\n vqm_offset = self.ddi_bytes.find(b'\\xFF'*8+b'VQM ')\n self.vqm_data = None\n if vqm_offset != -1:\n self.ddi_data.seek(vqm_offset)\n self.vqm_data = self.read_vqm()\n self.offset_map['vqm'] = [vqm_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'vqm.yml'), mode='w',\n encoding='utf-8') as vqm_f:\n vqm_str = yaml.dump(self.vqm_data, default_flow_style=False,\n sort_keys=False)\n vqm_f.write(vqm_str)\n \n \n # DDI convert\n self.ddi_data_dict: dict[str, dict[str, list[artp_type]]] = {\n 'sta': {},\n 'art': {},\n }\n\n if self.vqm_data is not None:\n self.ddi_data_dict = {\n 'vqm': {},\n 'sta': {},\n 'art': {},\n }\n vqm_dict = []\n for idx, vqmp in self.vqm_data.items():\n vqm_dict.append({'snd': vqmp['snd'], 'epr': vqmp['epr'], 'pitch': vqmp['pitch1']})\n self.ddi_data_dict['vqm'] = vqm_dict\n\n sta_dict: dict[str, list[artp_type]] = {}\n for stau in self.sta_data.values():\n stau_dict: list[artp_type] = []\n for idx, stap in stau['stap'].items():\n stau_dict.append({'snd': stap['snd'], 'epr': stap['epr'], 'pitch': stap['pitch1']})\n sta_dict[stau['phoneme']] = stau_dict\n self.ddi_data_dict['sta'] = {key: sta_dict[key]\n for key in sorted(sta_dict.keys())}\n\n art_dict: dict[str, list[artp_type]] = {}\n for art in self.art_data.values():\n if 'artu' in art.keys():\n for artu in art['artu'].values():\n key = art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n if 'art' in art.keys():\n for sub_art in art['art'].values():\n sub_art: art_type\n if 'artu' in sub_art.keys():\n for artu in sub_art['artu'].values():\n key = art['phoneme']+' '+sub_art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n self.ddi_data_dict['art'] = {key: art_dict[key]\n for key in sorted(art_dict.keys())}\n\n\n def save(self, dst_path: Optional[str] = None):\n import yaml\n\n with open(os.path.join(dst_path, 'ddi.yml'), mode='w', encoding='utf-8') as ddi_f:\n ddi_str = yaml.dump(self.ddi_data_dict, default_flow_style=False,\n sort_keys=False)\n ddi_f.write(ddi_str)\n\n\n def read_phdc(self):\n phdc_data: dict[str, dict[int, list[str]]\n | dict[str, dict[int, str]]\n | dict[str, list[str]]\n | str]\n phdc_data = {}\n # PHDC\n phoneme_data: dict[str, list[str]] = {\"voiced\": [], \"unvoiced\": []}\n assert self.ddi_data.read(4).decode() == 'PHDC'\n phdc_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 4\n phoneme_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phoneme_num):\n bytes_str = self.ddi_data.read(0x1F)\n assert bytes_str[-1] in [0, 1]\n real_data = bytes_str[:-1].decode().strip('\\x00')\n\n phoneme_type = \"voiced\" if bytes_str[-1] == 0 else \"unvoiced\"\n\n phoneme_data[phoneme_type].append(real_data)\n phdc_data['phoneme'] = phoneme_data\n\n # PHG2\n phg2_data: dict[str, dict[int, str]] = {}\n assert self.ddi_data.read(4).decode() == 'PHG2'\n phg2_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phg2_epr_guide_num):\n phg2_key = read_str(self.ddi_data)\n phg2_data[phg2_key] = {}\n temp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(temp_num):\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_data[phg2_key][idx] = read_str(self.ddi_data)\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n phdc_data['phg2'] = phg2_data\n\n # epr_guide\n epr_guide_data: dict[str, list[str]] = {}\n epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_guide_size = phdc_size-phg2_size-0x10-0x1F*phoneme_num-4\n epr_guide_bytes = self.ddi_data.read(epr_guide_size)\n offset = 0\n for i in range(epr_guide_num):\n key = epr_guide_bytes[offset:offset+0x20].decode().strip('\\x00')\n assert int.from_bytes(epr_guide_bytes[offset+0x20:offset+0x24],\n byteorder='little') == 4\n epr_guide_data[key] = []\n offset += 0x24\n while(offset < len(epr_guide_bytes) and epr_guide_bytes[offset] == 0):\n if epr_guide_bytes[offset+7] == 0x40:\n value = epr_guide_bytes[offset:offset + 7]\n start_idx = 0\n for i in range(7):\n if value[i] != 0:\n start_idx = i\n break\n # TODO: Need to check carefully. \"b'XXX'\" and we only take XXX\n value = bytes_to_str(value[start_idx:])\n epr_guide_data[key].append(value)\n else:\n assert int.from_bytes(epr_guide_bytes[offset:offset + 8],\n byteorder='little') == 0\n epr_guide_data[key].append('')\n offset += 8\n assert offset == len(epr_guide_bytes)\n phdc_data['epr_guide'] = epr_guide_data\n\n # hash string\n # phdc_data['hash'] = self.ddi_data.read(0x20).decode()\n # assert int.from_bytes(self.ddi_data.read(0xE0), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n\n return phdc_data\n\n\n def read_tdb(self) -> dict[int, str]:\n tdb_data: dict[int, str] = {}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TDB '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi (B9 13 10 00)\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n tmm_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n str_list = ['pitch', 'dynamics', 'opening']\n for i in range(tmm_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TMM '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # print(i, idx)\n str_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert str_num == 3\n for j in range(str_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 0\n assert read_str(self.ddi_data) == str_list[j]\n phoneme = read_str(self.ddi_data)\n tdb_data[idx] = phoneme\n assert read_str(self.ddi_data) == 'timbre'\n return tdb_data\n\n\n def read_dbv(self) -> None:\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'DBV '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # 4 for AVANNA, 5 for others?\n\n\n def read_sta(self) -> dict[int, artu_type]:\n sta_data: dict[int, artu_type] = {}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n\n assert self.ddi_data.read(4).decode() == 'STA '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n stau_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(stau_num):\n stau_data: artu_type = {'phoneme': '', 'stap': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n stau_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(8) == b'\\xFF'*8\n stap_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(stap_num):\n stap_data: artp_type = {'snd': '', 'snd_length': '', 'epr': []}\n _pos = self.ddi_data.tell()\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n stap_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n stap_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n \n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0x3D\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'SND'\n stap_data['snd_length'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'EpR'\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n stap_data['epr'] = epr_list\n stap_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n stap_data['snd'] = f'{snd_offset_pos:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n\n _pos = self.ddi_data.tell()\n stap_data['unknown4'] = bytes_to_str(self.ddi_data.read(0x10))\n stap_idx = read_str(self.ddi_data)\n assert stap_idx not in stau_data['stap'].keys()\n stau_data['stap'][stap_idx] = stap_data\n stau_data['stap'] = {k: stau_data['stap'][k]\n for k in sorted(stau_data['stap'].keys())}\n stau_data['phoneme'] = read_str(self.ddi_data)\n sta_data[stau_idx] = stau_data\n sta_data = {k: sta_data[k] for k in sorted(sta_data.keys())}\n assert read_str(self.ddi_data) == 'normal'\n assert read_str(self.ddi_data) == 'stationary'\n return sta_data\n\n\n def read_art(self) -> dict[int, art_type]:\n total_art_data: dict[int, art_type] = {}\n int.from_bytes(self.ddi_data.read(8), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') != 0\n while(True):\n start = self.ddi_data.read(8)\n if not (start in [b'\\x00'*8, b'\\xFF'*8]):\n offset = self.ddi_data.tell()-8\n self.ddi_data.seek(offset)\n assert read_str(self.ddi_data) == 'articulation'\n break\n assert self.ddi_data.read(4).decode() == 'ART '\n art_idx, art_data = self.read_art_block()\n total_art_data[art_idx] = art_data\n total_art_data = {key: total_art_data[key]\n for key in sorted(total_art_data.keys())}\n return total_art_data\n\n\n def read_art_block(self) -> tuple[int, art_type]:\n art_data: art_type = {'phoneme': '', 'artu': {}, 'art': {}}\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n art_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artu_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n i = -1\n for i in range(artu_num):\n artu_data: artu_type = {'phoneme': '', 'artp': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n block_type = self.ddi_data.read(4).decode()\n if block_type == 'ART ':\n sub_art_idx, sub_art_data = self.read_art_block()\n art_data['art'][sub_art_idx] = sub_art_data\n continue\n else:\n assert block_type == 'ARTu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n artu_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # TODO: why to be 1?\n assert int.from_bytes(self.ddi_data.read(8),\n byteorder='little') in [0, 1]\n self.ddi_data.read(4)\n assert self.ddi_data.read(4) == b'\\xFF'*4\n artp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(artp_num):\n artp_data: artp_type = {'snd': '', 'snd_unknown': '', 'epr': []}\n dev_artp_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['dev_artp'] = f'{dev_artp_offset:0>8x}'\n assert self.ddi_data.read(4).decode() == 'ARTp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n artp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n artp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # print(f'art {i:4d} {j:4d} {unknown}')\n # if env['unknown'] is None:\n # env['unknown'] = unknown\n # else:\n # assert env['unknown'] == unknown\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n # TODO: This doesn't seem to be an index actually\n artp_idx = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt1 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt1'] = f'{snd_len_empt1:08x}'\n assert read_str(self.ddi_data) == 'SND'\n snd_len_sta = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artp_data['snd_len_sta'] = f'{snd_len_sta:08x}'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt2 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt2'] = f'{snd_len_empt2:08x}'\n assert read_str(self.ddi_data) == 'EpR'\n loc = self.ddi_data.tell()\n try:\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n except AssertionError:\n self.ddi_data.seek(loc)\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n \n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['snd'] = f'{snd_offset_pos:08x}={snd_offset-0x12:016x}_{snd_identifier:08x}'\n\n snd_offset2_pos = self.ddi_data.tell()\n snd_offset2 = int.from_bytes(self.ddi_data.read(8), byteorder='little') # == snd_offset+0x800 Exception: Tonio.ddi (0)\n artp_data['snd_start'] = f'{snd_offset2_pos:08x}={snd_offset2-0x12:016x}_{snd_identifier:08x}'\n\n ddi_bytes: bytes = self.ddi_bytes[self.ddi_data.tell():self.ddi_data.tell() + 1024]\n align_length = ddi_bytes.find(b'default')-4\n align_bytes = self.ddi_data.read(align_length)\n frame_align = []\n if align_length > 4:\n align_group_num = int.from_bytes(align_bytes[0:4], byteorder='little')\n # In V3 format, each group has int32 * 4 bytes\n align_bytes = align_bytes[4:]\n align_io = io.BytesIO(align_bytes)\n for _ in range(0, align_group_num):\n frame_align_group = {\n \"start\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"start2\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end2\": int.from_bytes(align_io.read(4), byteorder='little'),\n }\n frame_align.append(frame_align_group)\n else: # V2 format\n frame_align_group = []\n for i in range(0, len(align_bytes), 4):\n frame_align_group.append(int.from_bytes(align_bytes[i:i+4], byteorder='little'))\n frame_align.append(frame_align_group)\n artp_data['frame_align'] = frame_align\n \n assert read_str(self.ddi_data) == 'default'\n\n assert artp_idx not in artu_data['artp'].keys()\n artu_data['artp'][artp_idx] = artp_data\n artu_data['artp'] = {k: artu_data['artp'][k]\n for k in sorted(artu_data['artp'].keys())}\n artu_data['phoneme'] = read_str(self.ddi_data)\n art_data['artu'][artu_idx] = artu_data\n art_data['artu'] = {k: art_data['artu'][k]\n for k in sorted(art_data['artu'].keys())}\n art_data['art'] = {k: art_data['art'][k]\n for k in sorted(art_data['art'].keys())}\n art_data['phoneme'] = read_str(self.ddi_data)\n if len(art_data['art'].keys()) == 0:\n del art_data['art']\n if len(art_data['artu'].keys()) == 0:\n del art_data['artu']\n return art_idx, art_data\n\n\n def read_vqm(self) -> dict[int, artp_type]:\n vqm_data: dict[int, artp_type] = {}\n\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQM '\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert self.ddi_data.read(8) == b'\\xFF'*8\n\n assert self.ddi_data.read(4).decode() == 'VQMu'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n\n vqmp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == vqmp_num\n for i in range(vqmp_num):\n vqmp_data = {'snd': '', 'epr': []}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQMp'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n vqmp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n vqmp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # TODO: that may not be same as env['unknown']\n vqmp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4) == b'\\xFF'*4\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n ddi_epr_offset = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n epr_list.append(f'{ddi_epr_offset:0>8x}={epr_offset:0>8x}')\n vqmp_data['epr'] = epr_list\n vqmp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n ddi_snd_offset = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n vqmp_data['snd'] = f'{ddi_snd_offset:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n assert self.ddi_data.read(0x10) == b'\\xFF'*0x10\n vqmp_idx = int(read_str(self.ddi_data))\n vqm_data[vqmp_idx] = vqmp_data\n assert read_str(self.ddi_data) == 'GROWL'\n assert read_str(self.ddi_data) == 'vqm'\n return vqm_data" }, { "identifier": "bytes_to_str", "path": "utils/ddi_utils.py", "snippet": "def bytes_to_str(data: bytes, add_spaces: bool = True) -> str:\n if add_spaces:\n return ' '.join([f'{piece:02x}' for piece in list(data)])\n else:\n return ''.join([f'{piece:02x}' for piece in list(data)])" }, { "identifier": "stream_reverse_search", "path": "utils/ddi_utils.py", "snippet": "def stream_reverse_search(data: io.BufferedReader, search: bytes, offset: int, limit: int = -1) -> int:\n if limit == -1:\n limit = 1024 * 1024 * 10\n offset -= len(search)\n for i in range(offset, 0, -1):\n data.seek(i)\n if data.read(len(search)) == search:\n return i\n if offset - i > limit:\n break\n\n return -1" } ]
import argparse import math import os import re import time import wave from typing import Sequence, TypedDict from utils.ddi_utils import DDIModel, bytes_to_str, stream_reverse_search
11,581
start_time = offset_time + \ frm2sec(frame_align[i]["start"], sample_rate) end_time = offset_time + frm2sec(frame_align[i]["end"], sample_rate) if i == 0: boundaries.append(start_time) boundaries.append(end_time) seg_list.append([phoneme, start_time, end_time]) art_seg_info: ArticulationSegmentInfo = { "boundaries": boundaries, "phonemes": [] } if len(phonemes) == 4: # VCV art_seg_info["phonemes"] = [phonemes[0], phonemes[1], phonemes[3]] else: art_seg_info["phonemes"] = phonemes trans_content = generate_transcription(seg_list) seg_content = generate_seg(seg_list, duration_time) art_seg_content = generate_articulation_seg( art_seg_info, total_bytes, unvoiced_consonant_list) return trans_content, seg_content, art_seg_content def generate_transcription(seg_info: list[list]) -> str: content = [] phoneme_list = [] for i in range(0, len(seg_info)): phoneme_list.append(seg_info[i][0]) content.append(" ".join(phoneme_list)) trans_group = [item[0] for item in seg_info] content.append("[" + " ".join(trans_group) + "]") return "\n".join(content) def generate_seg( phoneme_list: list[list], wav_length: float ) -> str: content = [ "nPhonemes %d" % (len(phoneme_list) + 2,), # Add 2 Sil "articulationsAreStationaries = 0", "phoneme BeginTime EndTime", "===================================================", ] content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", 0, phoneme_list[0][1])) begin_time: float = 0 end_time: float = 0 for i in range(0, len(phoneme_list)): phoneme_info = phoneme_list[i] phoneme_name = phoneme_info[0] begin_time = phoneme_info[1] end_time = phoneme_info[2] content.append("%s\t\t%.6f\t\t%.6f" % (phoneme_name, begin_time, end_time)) content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", end_time, wav_length)) return "\n".join(content) + "\n" def generate_articulation_seg( art_seg_info: ArticulationSegmentInfo, wav_samples: int, unvoiced_consonant_list: list[str] ) -> str: content = [ "nphone art segmentation", "{", '\tphns: ["' + ('", "'.join(art_seg_info["phonemes"])) + '"];', "\tcut offset: 0;", "\tcut length: %d;" % int(math.floor(wav_samples / 2)), ] boundaries_str = [ ("%.9f" % item) for item in art_seg_info["boundaries"] ] content.append("\tboundaries: [" + ", ".join(boundaries_str) + "];") content.append("\trevised: false;") voiced_str = [] is_triphoneme = len(art_seg_info["phonemes"]) == 3 for i in range(0, len(art_seg_info["phonemes"])): phoneme = art_seg_info["phonemes"][i] is_unvoiced = phoneme in unvoiced_consonant_list or phoneme in [ "Sil", "Asp", "?", ] voiced_str.append(str(not is_unvoiced).lower()) if is_triphoneme and i == 1: # Triphoneme needs 2 flags for center phoneme voiced_str.append(str(not is_unvoiced).lower()) content.append("\tvoiced: [" + ", ".join(voiced_str) + "];") content.append("};") content.append("") return "\n".join(content) def main(): ddi_path, ddb_path, dst_path, filename_style, gen_lab, gen_seg = parse_args() snd_pos_list: list[int] = [] # Read DDI file print("Reading DDI...") with open(ddi_path, "rb") as f: ddi_bytes = f.read()
#!/usr/bin/env python3 from __future__ import annotations start_encode = 'SND '.encode() wav_params = (1, 2, 44100, 0, 'NONE', 'NONE') window_size = 512 class ArticulationSegmentInfo(TypedDict): phonemes: list[str, str] boundaries: list[list[str, float, float]] def escape_xsampa(xsampa: str) -> str: """Escapes xsampa to file name.""" xsampa = xsampa.replace("Sil", "sil") # Sil is a special case xsampa = ( xsampa.replace("\\", "-") .replace("/", "~") .replace("?", "!") .replace(":", ";") .replace("<", "(") .replace(">", ")") ) return xsampa def unescape_xsampa(xsampa: str) -> str: """Unescapes xsampa from file name.""" xsampa = ( xsampa.replace("-", "\\") .replace("~", "/") .replace("!", "?") .replace(";", ":") .replace("(", "<") .replace(")", ">") ) return xsampa def parse_args(args: Sequence[str] = None): # : list[str] # initialize parser parser = argparse.ArgumentParser() parser.add_argument('--src_path', required=True, help='source ddi file path') parser.add_argument('--dst_path', help='destination extract path, ' 'default to be "./[name]/snd"') parser.add_argument('--gen_lab', action='store_true', help='generate lab file') parser.add_argument('--gen_seg', action='store_true', help='generate trans, seg, as files') parser.add_argument('--filename_style', type=str, choices=['flat', 'devkit'], default=None, help="output filename style, default to be 'devkit', or default to be 'flat' if gen_lab is true.") # parse args args_result = parser.parse_args(args) ddi_path: str = os.path.normpath(args_result.src_path) ddb_path: str = re.sub(r'\.ddi$', '.ddb', ddi_path) dst_path: str = args_result.dst_path if dst_path is None: dst_path = os.path.dirname(ddi_path) + '/snd' dst_path: str = os.path.normpath(dst_path) # make dirs if not os.path.exists(dst_path): os.makedirs(dst_path) gen_lab: bool = args_result.gen_lab gen_seg: bool = args_result.gen_seg filename_style: str = args_result.filename_style if filename_style is None: if gen_lab or gen_seg: filename_style = "flat" else: filename_style = "devkit" return ddi_path, ddb_path, dst_path, filename_style, gen_lab, gen_seg def create_file_name(phonemes: list[str], name_style: str, offset: int, pitch: float, dst_path: str, file_type: str): offset_hex = f'{offset:0>8x}' escaped_phonemes = [escape_xsampa(p) for p in phonemes] phonemes_len = len(phonemes) if pitch >= 0: pit_str = f"pit+{pitch:.2f}" else: pit_str = f"pit{pitch:.2f}" filename = "" if name_style == "flat": phonemes_str = "-".join(escaped_phonemes) prefix = "" if phonemes_len == 0: filename = f"unknown_{offset_hex}.{file_type}" else: if phonemes_len == 1: if phonemes[0] == "growl": prefix = "growl" else: prefix = "sta" elif phonemes_len == 2: prefix = "art" elif phonemes_len == 3: prefix = "tri" file_type_prefix = "lab" if file_type == "lab" else "wav" filename = f"{file_type_prefix}/{prefix}_[{phonemes_str}]_{pit_str}_{offset_hex}.{file_type}" elif name_style == "devkit": phonemes_path = "/".join([item + "#" + bytes_to_str(item.encode('utf-8')) for item in escaped_phonemes]) root_path = "" if phonemes_len == 0: filename = f"unknown/{offset_hex}.{file_type}" else: if phonemes_len == 1: if phonemes[0] == "growl": root_path = "vqm/growl" else: root_path = "stationary" elif phonemes_len == 2: root_path = "articulation" elif phonemes_len == 3: root_path = "triphoneme" filename = f"{root_path}/{phonemes_path}/{pit_str}_{offset_hex}.{file_type}" folder = os.path.dirname(filename) if folder != "": os.makedirs(os.path.join(dst_path, folder), exist_ok=True) return filename def nsample2sec(nsample: int, sample_rate: int) -> float: return nsample / sample_rate / 2 def frm2sec(frm: int, sample_rate: int) -> float: return frm * window_size / sample_rate / 2 def generate_lab(phonemes: list[str], frame_align: list[dict], sample_rate: int, offset_bytes: int, total_bytes: int): offset_time = nsample2sec(offset_bytes, sample_rate) * 1e7 duration_time = nsample2sec(total_bytes, sample_rate) * 1e7 lab_lines = [] if len(phonemes) == 3: # VCV center_phoneme = re.sub("^\^", "", phonemes[1]) phonemes = [phonemes[0], center_phoneme, center_phoneme, phonemes[2]] lab_lines.append(f"0 {offset_time:.0f} sil") last_time = 0 for i, phoneme in enumerate(phonemes): frame = frame_align[i] start_time = offset_time + frm2sec(frame["start"], sample_rate) * 1e7 end_time = offset_time + frm2sec(frame["end"], sample_rate) * 1e7 lab_lines.append(f'{start_time:.0f} {end_time:.0f} {phoneme}') last_time = end_time lab_lines.append(f'{last_time:.0f} {duration_time:.0f} sil') return "\n".join(lab_lines) def generate_seg_files( phonemes: list[str], frame_align: list[dict], sample_rate: int, offset_bytes: int, total_bytes: int, unvoiced_consonant_list: list[str]): offset_time = nsample2sec(offset_bytes, sample_rate) duration_time = nsample2sec(total_bytes, sample_rate) if len(phonemes) == 3: # VCV center_phoneme = re.sub("^\^", "", phonemes[1]) phonemes = [phonemes[0], center_phoneme, center_phoneme, phonemes[2]] seg_list: list[list] = [] boundaries: list[float] = [] for i, phoneme in enumerate(phonemes): start_time = offset_time + \ frm2sec(frame_align[i]["start"], sample_rate) end_time = offset_time + frm2sec(frame_align[i]["end"], sample_rate) if i == 0: boundaries.append(start_time) boundaries.append(end_time) seg_list.append([phoneme, start_time, end_time]) art_seg_info: ArticulationSegmentInfo = { "boundaries": boundaries, "phonemes": [] } if len(phonemes) == 4: # VCV art_seg_info["phonemes"] = [phonemes[0], phonemes[1], phonemes[3]] else: art_seg_info["phonemes"] = phonemes trans_content = generate_transcription(seg_list) seg_content = generate_seg(seg_list, duration_time) art_seg_content = generate_articulation_seg( art_seg_info, total_bytes, unvoiced_consonant_list) return trans_content, seg_content, art_seg_content def generate_transcription(seg_info: list[list]) -> str: content = [] phoneme_list = [] for i in range(0, len(seg_info)): phoneme_list.append(seg_info[i][0]) content.append(" ".join(phoneme_list)) trans_group = [item[0] for item in seg_info] content.append("[" + " ".join(trans_group) + "]") return "\n".join(content) def generate_seg( phoneme_list: list[list], wav_length: float ) -> str: content = [ "nPhonemes %d" % (len(phoneme_list) + 2,), # Add 2 Sil "articulationsAreStationaries = 0", "phoneme BeginTime EndTime", "===================================================", ] content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", 0, phoneme_list[0][1])) begin_time: float = 0 end_time: float = 0 for i in range(0, len(phoneme_list)): phoneme_info = phoneme_list[i] phoneme_name = phoneme_info[0] begin_time = phoneme_info[1] end_time = phoneme_info[2] content.append("%s\t\t%.6f\t\t%.6f" % (phoneme_name, begin_time, end_time)) content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", end_time, wav_length)) return "\n".join(content) + "\n" def generate_articulation_seg( art_seg_info: ArticulationSegmentInfo, wav_samples: int, unvoiced_consonant_list: list[str] ) -> str: content = [ "nphone art segmentation", "{", '\tphns: ["' + ('", "'.join(art_seg_info["phonemes"])) + '"];', "\tcut offset: 0;", "\tcut length: %d;" % int(math.floor(wav_samples / 2)), ] boundaries_str = [ ("%.9f" % item) for item in art_seg_info["boundaries"] ] content.append("\tboundaries: [" + ", ".join(boundaries_str) + "];") content.append("\trevised: false;") voiced_str = [] is_triphoneme = len(art_seg_info["phonemes"]) == 3 for i in range(0, len(art_seg_info["phonemes"])): phoneme = art_seg_info["phonemes"][i] is_unvoiced = phoneme in unvoiced_consonant_list or phoneme in [ "Sil", "Asp", "?", ] voiced_str.append(str(not is_unvoiced).lower()) if is_triphoneme and i == 1: # Triphoneme needs 2 flags for center phoneme voiced_str.append(str(not is_unvoiced).lower()) content.append("\tvoiced: [" + ", ".join(voiced_str) + "];") content.append("};") content.append("") return "\n".join(content) def main(): ddi_path, ddb_path, dst_path, filename_style, gen_lab, gen_seg = parse_args() snd_pos_list: list[int] = [] # Read DDI file print("Reading DDI...") with open(ddi_path, "rb") as f: ddi_bytes = f.read()
ddi_model = DDIModel(ddi_bytes)
0
2023-11-20 11:37:46+00:00
16k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n # print('************************encoder shape',x.shape)\n\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n\n\n if conditioning is not None:\n if isinstance(conditioning, dict):\n if isinstance(list(conditioning.values())[0],list):\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n else:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "Attention_AR_counter", "path": "text_super_resolution/model/VisionLAN/utils.py", "snippet": "class Attention_AR_counter():\n def __init__(self, display_string, dict_file, case_sensitive):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n self.display_string = display_string\n self.case_sensitive = case_sensitive\n self.de = cha_encdec(dict_file, case_sensitive)\n\n def clear(self):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n \n def add_iter(self, output, out_length, label_length, labels):\n self.total_samples += label_length.size()[0]\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n for i in range(0, len(prdt_texts)):\n if not self.case_sensitive:\n prdt_texts[i] = prdt_texts[i].lower()\n labels[i] = labels[i].lower()\n all_words = []\n for w in labels[i].split('|') + prdt_texts[i].split('|'):\n if w not in all_words:\n all_words.append(w)\n l_words = [all_words.index(_) for _ in labels[i].split('|')]\n p_words = [all_words.index(_) for _ in prdt_texts[i].split('|')]\n self.distance_C += ed.eval(labels[i], prdt_texts[i])\n self.distance_W += ed.eval(l_words, p_words)\n self.total_C += len(labels[i])\n self.total_W += len(l_words)\n self.correct = self.correct + 1 if labels[i] == prdt_texts[i] else self.correct\n return prdt_texts, labels\n\n def show(self):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W))\n self.clear()\n def show_test(self,best_acc, change= False):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n if (self.correct / self.total_samples) > best_acc:\n best_acc = np.copy(self.correct / self.total_samples)\n change = True\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}, best_acc: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W, best_acc))\n\n self.clear()\n return best_acc, change\n \n def convert(self, output, out_length):\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n prdt_prob = prdt_prob.cpu().unsqueeze(0)\n MAX_LEN = 25\n length = prdt_prob.size(1)\n if length >= MAX_LEN:\n return prdt_prob[:, :MAX_LEN, :], prdt_prob\n pad = torch.zeros([prdt_prob.shape[0], MAX_LEN - length, prdt_prob.shape[2]])\n prdt_prob = torch.cat([prdt_prob, pad], dim=1)\n return prdt_texts, prdt_prob" }, { "identifier": "TPSSpatialTransformer", "path": "text_super_resolution/model/tps_spatial_transformer.py", "snippet": "class TPSSpatialTransformer(nn.Module):\n\n def __init__(self, output_image_size=None, num_control_points=None, margins=None):\n super(TPSSpatialTransformer, self).__init__()\n self.output_image_size = output_image_size\n self.num_control_points = num_control_points\n self.margins = margins\n\n self.target_height, self.target_width = output_image_size\n target_control_points = build_output_control_points(num_control_points, margins)\n N = num_control_points\n # N = N - 4\n\n # create padded kernel matrix\n forward_kernel = torch.zeros(N + 3, N + 3)\n target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)\n forward_kernel[:N, :N].copy_(target_control_partial_repr)\n forward_kernel[:N, -3].fill_(1)\n forward_kernel[-3, :N].fill_(1)\n forward_kernel[:N, -2:].copy_(target_control_points)\n forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))\n # compute inverse matrix\n inverse_kernel = torch.inverse(forward_kernel)\n\n # create target cordinate matrix\n HW = self.target_height * self.target_width\n target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))\n target_coordinate = torch.Tensor(target_coordinate) # HW x 2\n Y, X = target_coordinate.split(1, dim = 1)\n Y = Y / (self.target_height - 1)\n X = X / (self.target_width - 1)\n target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)\n target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)\n target_coordinate_repr = torch.cat([\n target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate\n ], dim = 1)\n\n # register precomputed matrices\n self.register_buffer('inverse_kernel', inverse_kernel)\n self.register_buffer('padding_matrix', torch.zeros(3, 2))\n self.register_buffer('target_coordinate_repr', target_coordinate_repr)\n self.register_buffer('target_control_points', target_control_points)\n\n def forward(self, input, source_control_points):\n assert source_control_points.ndimension() == 3\n assert source_control_points.size(1) == self.num_control_points\n assert source_control_points.size(2) == 2\n batch_size = source_control_points.size(0)\n\n Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)\n mapping_matrix = torch.matmul(self.inverse_kernel, Y)\n source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)\n\n grid = source_coordinate.view(-1, self.target_height, self.target_width, 2)\n grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1].\n # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]\n grid = 2.0 * grid - 1.0\n output_maps = grid_sample(input, grid, canvas=None)\n return output_maps, source_coordinate" }, { "identifier": "STNHead", "path": "text_super_resolution/model/stn_head.py", "snippet": "class STNHead(nn.Module):\n def __init__(self, in_planes, num_ctrlpoints, activation='none', input_size=(16, 64)):\n super(STNHead, self).__init__()\n\n self.in_planes = in_planes\n self.num_ctrlpoints = num_ctrlpoints\n self.activation = activation\n self.stn_convnet = nn.Sequential(\n # conv3x3_block(in_planes, 32), # 32*128\n # nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(in_planes, 32), # 16*64\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(32, 64), # 8*32\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(64, 128), # 4*16\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(128, 256), # 2*8\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(256, 256), # 1*4,\n nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)),\n conv3x3_block(256, 256)) # 1*2\n\n flatten_width = int(input_size[1] / 32)\n # print(\"flw:\", input_size[1] / 32)\n self.stn_fc1 = nn.Sequential(\n nn.Linear(512, 512), #flatten_width*256\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True))\n self.stn_fc2 = nn.Linear(512, num_ctrlpoints*2)\n\n self.init_weights(self.stn_convnet)\n self.init_weights(self.stn_fc1)\n self.init_stn(self.stn_fc2)\n\n def init_weights(self, module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.001)\n m.bias.data.zero_()\n\n def init_stn(self, stn_fc2):\n margin = 0.01\n sampling_num_per_side = int(self.num_ctrlpoints / 2)\n ctrl_pts_x = np.linspace(margin, 1.-margin, sampling_num_per_side)\n ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin\n ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1-margin)\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)\n # print(ctrl_points.shape)\n if self.activation is 'none':\n pass\n elif self.activation == 'sigmoid':\n ctrl_points = -np.log(1. / ctrl_points - 1.)\n elif self.activation == 'relu':\n ctrl_points = F.relu(torch.Tensor(ctrl_points))\n stn_fc2.weight.data.zero_()\n stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)\n\n def forward(self, x):\n x = self.stn_convnet(x)\n batch_size, _, h, w = x.size()\n x = x.view(batch_size, -1)\n\n # print(\"x:\", x.shape)\n\n img_feat = self.stn_fc1(x)\n x = self.stn_fc2(0.1 * img_feat)\n if self.activation == 'sigmoid':\n x = torch.sigmoid(x)\n if self.activation == 'relu':\n x = F.relu(x)\n x = x.view(-1, self.num_ctrlpoints, 2)\n return img_feat, x" }, { "identifier": "VisionLAN", "path": "text_super_resolution/model/VisionLAN/VisionLAN.py", "snippet": "class VisionLAN(nn.Module):\n '''\n Architecture of VisionLAN\n input\n input: input image\n label_pos: character index\n output\n text_pre: word-level prediction from VRM\n test_rem: remaining string prediction from MLM\n text_mas: occluded character prediction from MLM\n '''\n def __init__(self, strides, input_shape):\n super(VisionLAN, self).__init__()\n self.backbone = resnet.resnet45(strides, compress_layer=False)\n self.input_shape = input_shape\n self.MLM_VRM = MLM_VRM()\n def forward(self, input, label_pos, training_stp, Train_in = True):\n # extract features\n features = self.backbone(input)\n # MLM + VRM\n if Train_in:\n text_pre, test_rem, text_mas, mask_map = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return text_pre, test_rem, text_mas, mask_map\n else:\n output, out_length = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return output, out_length" }, { "identifier": "SemanticLoss", "path": "text_super_resolution/loss/semantic_loss.py", "snippet": "class SemanticLoss(nn.Module):\n def __init__(self, margin=0.1):\n super(SemanticLoss, self).__init__()\n self.cos_sim = nn.CosineSimilarity(dim=-1, eps=1e-8)\n self.margin = margin\n\n self.lambda1 = 1.0\n self.lambda2 = 1.0\n\n self.kl_loss = torch.nn.KLDivLoss()\n\n def forward(self, pred_vec, gt_vec):\n # pred_vec: [N, C]\n # gt_vec: [N, C]\n # mean_sim = torch.mean(self.cos_sim(gt_vec, pred_vec))\n # sim_loss = 1 - mean_sim\n \n #noise = Variable(torch.rand(pred_vec.shape)) * 0.1 - 0.05\n\n #normed_pred_vec = pred_vec + noise.to(pred_vec.device)\n # print(\"pred_vec:\", pred_vec.shape)\n norm_vec = torch.abs(gt_vec - pred_vec)\n margin_loss = torch.mean(norm_vec) #\n\n # pr int(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n ce_loss = self.kl_loss(torch.log(pred_vec + 1e-20), gt_vec + 1e-20)\n # print(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n\n return self.lambda1 * margin_loss + self.lambda2 * ce_loss# ce_loss #margin_loss # + ce_loss # + sim_loss #margin_loss +\n\n def cross_entropy(self, pred_vec, gt_vec, l=1e-5):\n cal = gt_vec * torch.log(pred_vec+l) + (1 - gt_vec) * torch.log(1 - pred_vec+l)\n #print(\"cal:\", cal)\n return -cal" }, { "identifier": "ssim_psnr", "path": "text_super_resolution/utils/ssim_psnr.py", "snippet": "def calculate_psnr(img1, img2):\ndef weighted_calculate_psnr(img1, img2, weighted_mask):\ndef gaussian(window_size, sigma):\ndef create_window(window_size, channel):\ndef create_rect_window(window_H, window_W, channel):\ndef _ssim_weighted(img1_, img2_, window, window_size, channel, weighted_mask, size_average=True):\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\ndef _tri_ssim(img1, img2, img3, window, window_size, channel, size_average=True):\ndef _ssim_rect(img1, img2, window, window_size, channel, size_average=True):\n def __init__(self, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, img3):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, weighted_mask):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\ndef ssim(img1, img2, window_size=11, size_average=True):\ndef ssim_weighted(img1, img2, weighted_mask, window_size=11, size_average=True):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n H, W = window_size\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\nclass Distorted_SSIM(torch.nn.Module):\nclass SSIM(torch.nn.Module):\nclass TRI_SSIM(torch.nn.Module):\nclass SSIM_WEIGHTED(torch.nn.Module):\nclass SSIM_TSR(torch.nn.Module):" } ]
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,310
x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins))
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins))
self.stn_head = STNHead(
20
2023-11-20 06:34:21+00:00
16k
mjavadpur/mj_ONNX_SadTalker
inference_onnx.py
[ { "identifier": "AnimateFromCoeff", "path": "src/facerender/animate_onnx.py", "snippet": "class AnimateFromCoeff():\n\n def __init__(self, sadtalker_path, device):\n\n with open(sadtalker_path['facerender_yaml']) as f:\n config = yaml.safe_load(f)\n\n generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],\n **config['model_params']['common_params'])\n kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],\n **config['model_params']['common_params'])\n he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],\n **config['model_params']['common_params'])\n mapping = MappingNet(**config['model_params']['mapping_params'])\n\n generator.to(device)\n kp_extractor.to(device)\n he_estimator.to(device)\n mapping.to(device)\n for param in generator.parameters():\n param.requires_grad = False\n for param in kp_extractor.parameters():\n param.requires_grad = False \n for param in he_estimator.parameters():\n param.requires_grad = False\n for param in mapping.parameters():\n param.requires_grad = False\n\n if sadtalker_path is not None:\n if 'checkpoint' in sadtalker_path: # use safe tensor\n self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)\n else:\n self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)\n else:\n raise AttributeError(\"Checkpoint should be specified for video head pose estimator.\")\n\n if sadtalker_path['mappingnet_checkpoint'] is not None:\n self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)\n else:\n raise AttributeError(\"Checkpoint should be specified for video head pose estimator.\") \n\n self.kp_extractor = kp_extractor\n self.generator = generator\n self.he_estimator = he_estimator\n self.mapping = mapping\n\n self.kp_extractor.eval()\n self.generator.eval()\n self.he_estimator.eval()\n self.mapping.eval()\n \n self.device = device\n \n def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None, \n kp_detector=None, he_estimator=None, \n device=\"cpu\"):\n\n checkpoint = safetensors.torch.load_file(checkpoint_path)\n\n if generator is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'generator' in k:\n x_generator[k.replace('generator.', '')] = v\n generator.load_state_dict(x_generator)\n if kp_detector is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'kp_extractor' in k:\n x_generator[k.replace('kp_extractor.', '')] = v\n kp_detector.load_state_dict(x_generator)\n if he_estimator is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'he_estimator' in k:\n x_generator[k.replace('he_estimator.', '')] = v\n he_estimator.load_state_dict(x_generator)\n \n return None\n\n def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None, \n kp_detector=None, he_estimator=None, optimizer_generator=None, \n optimizer_discriminator=None, optimizer_kp_detector=None, \n optimizer_he_estimator=None, device=\"cpu\"):\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))\n if generator is not None:\n generator.load_state_dict(checkpoint['generator'])\n if kp_detector is not None:\n kp_detector.load_state_dict(checkpoint['kp_detector'])\n if he_estimator is not None:\n he_estimator.load_state_dict(checkpoint['he_estimator'])\n if discriminator is not None:\n try:\n discriminator.load_state_dict(checkpoint['discriminator'])\n except:\n print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')\n if optimizer_generator is not None:\n optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])\n if optimizer_discriminator is not None:\n try:\n optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])\n except RuntimeError as e:\n print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')\n if optimizer_kp_detector is not None:\n optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])\n if optimizer_he_estimator is not None:\n optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])\n\n return checkpoint['epoch']\n \n def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,\n optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))\n if mapping is not None:\n mapping.load_state_dict(checkpoint['mapping'])\n if discriminator is not None:\n discriminator.load_state_dict(checkpoint['discriminator'])\n if optimizer_mapping is not None:\n optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])\n if optimizer_discriminator is not None:\n optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])\n\n return checkpoint['epoch']\n\n def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):\n\n source_image=x['source_image'].type(torch.FloatTensor)\n source_semantics=x['source_semantics'].type(torch.FloatTensor)\n target_semantics=x['target_semantics_list'].type(torch.FloatTensor) \n source_image=source_image.to(self.device)\n source_semantics=source_semantics.to(self.device)\n target_semantics=target_semantics.to(self.device)\n if 'yaw_c_seq' in x:\n yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)\n yaw_c_seq = x['yaw_c_seq'].to(self.device)\n else:\n yaw_c_seq = None\n if 'pitch_c_seq' in x:\n pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)\n pitch_c_seq = x['pitch_c_seq'].to(self.device)\n else:\n pitch_c_seq = None\n if 'roll_c_seq' in x:\n roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) \n roll_c_seq = x['roll_c_seq'].to(self.device)\n else:\n roll_c_seq = None\n\n frame_num = x['frame_num']\n\n predictions_video = make_animation(source_image, source_semantics, target_semantics,\n self.generator, self.kp_extractor, self.he_estimator, self.mapping, \n yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)\n\n predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])\n predictions_video = predictions_video[:frame_num]\n\n video = []\n for idx in range(predictions_video.shape[0]):\n image = predictions_video[idx]\n image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)\n video.append(image)\n result = img_as_ubyte(video)\n\n ### the generated video is 256x256, so we keep the aspect ratio, \n original_size = crop_info[0]\n if original_size:\n result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]\n \n video_name = x['video_name'] + '.mp4'\n path = os.path.join(video_save_dir, 'temp_'+video_name)\n \n imageio.mimsave(path, result, fps=float(25))\n\n av_path = os.path.join(video_save_dir, video_name)\n return_path = av_path \n \n audio_path = x['audio_path'] \n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')\n start_time = 0\n # cog will not keep the .mp3 filename\n sound = AudioSegment.from_file(audio_path)\n frames = frame_num \n end_time = start_time + frames*1/25*1000\n word1=sound.set_frame_rate(16000)\n word = word1[start_time:end_time]\n word.export(new_audio_path, format=\"wav\")\n\n save_video_with_watermark(path, new_audio_path, av_path, watermark= False)\n print(f'The generated video is named {video_save_dir}/{video_name}') \n\n if 'full' in preprocess.lower():\n # only add watermark to the full image.\n video_name_full = x['video_name'] + '_full.mp4'\n full_video_path = os.path.join(video_save_dir, video_name_full)\n return_path = full_video_path\n paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n print(f'The generated video is named {video_save_dir}/{video_name_full}') \n else:\n full_video_path = av_path \n\n #### paste back then enhancers\n if enhancer:\n video_name_enhancer = x['video_name'] + '_enhanced.mp4'\n enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)\n av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) \n return_path = av_path_enhancer\n\n try:\n enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n except:\n enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n \n save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)\n print(f'The generated video is named {video_save_dir}/{video_name_enhancer}')\n os.remove(enhanced_path)\n\n os.remove(path)\n os.remove(new_audio_path)\n\n return return_path\n \n def generate_deploy(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):\n # Create Talking Face\n # 1. Reading Data\n source_image=x['source_image'].type(torch.FloatTensor)\n source_semantics=x['source_semantics'].type(torch.FloatTensor)\n target_semantics=x['target_semantics_list'].type(torch.FloatTensor) \n source_image=source_image.to(self.device)\n source_semantics=source_semantics.to(self.device)\n target_semantics=target_semantics.to(self.device)\n # 2. برای محاسبه به دستگاه self.device انتقال دهید\n if 'yaw_c_seq' in x:\n yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)\n yaw_c_seq = x['yaw_c_seq'].to(self.device)\n else:\n yaw_c_seq = None\n if 'pitch_c_seq' in x:\n pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)\n pitch_c_seq = x['pitch_c_seq'].to(self.device)\n else:\n pitch_c_seq = None\n if 'roll_c_seq' in x:\n roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) \n roll_c_seq = x['roll_c_seq'].to(self.device)\n else:\n roll_c_seq = None\n\n frame_num = x['frame_num']\n # 3. پیش‌بینی‌های مدل مولد برای ویدیوهای Talking Face\n predictions_video = make_animation(source_image, source_semantics, target_semantics,\n self.generator, self.kp_extractor, self.he_estimator, self.mapping, \n yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)\n # 4. تنظیم شکل و برش\n predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])\n predictions_video = predictions_video[:frame_num]\n # 5. هر فریم ویدیو را پیمایش کنید و آن را به Numpy تبدیل کنید و در نتیجه ذخیره کنید.\n video = []\n for idx in range(predictions_video.shape[0]):\n image = predictions_video[idx]\n image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)\n video.append(image)\n result = img_as_ubyte(video)\n # 6. اندازه تصویر در نتیجه را متناسب با اطلاعات اندازه اصلی در crop_info تغییر دهید.\n original_size = crop_info[0]\n if original_size:\n result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]\n\n # 7. از کتابخانه imageio برای ذخیره نتیجه به عنوان یک فایل ویدیویی با نرخ فریم 25 استفاده کنید.\n video_name = x['video_name'] + '.mp4'\n path = os.path.join(video_save_dir, 'temp_'+video_name)\n \n imageio.mimsave(path, result, fps=float(25))\n\n av_path = os.path.join(video_save_dir, video_name)\n return_path = av_path \n \n # 8. مسیر صوتی را در پارامتر x وارد کنید و یک مسیر فایل صوتی جدید ایجاد کنید.\n audio_path = x['audio_path'] \n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')\n start_time = 0\n sound = AudioSegment.from_file(audio_path)\n frames = frame_num \n end_time = start_time + frames*1/25*1000\n word1=sound.set_frame_rate(16000)\n word = word1[start_time:end_time]\n word.export(new_audio_path, format=\"wav\")\n \n\n save_video_with_watermark(path, new_audio_path, av_path, watermark= False)\n print(f' ---- The first generated video is named {video_save_dir}/{video_name}') \n \n if 'full' in preprocess.lower():\n # only add watermark to the full image.\n video_name_full = x['video_name'] + '_full.mp4'\n full_video_path = os.path.join(video_save_dir, video_name_full)\n return_path = full_video_path\n paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n print(f' ---- The full generated video is named {video_save_dir}/{video_name_full}') \n else:\n full_video_path = av_path \n \n if enhancer:\n video_name_enhancer = x['video_name'] + '_enhanced.mp4'\n enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)\n av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) \n return_path = av_path_enhancer\n\n print(\" ---- video_name_enhancer: \" + video_name_enhancer + \"\\n ---- enhanced_path: \" + enhanced_path + \"\\n ---- av_path_enhancer: \" + av_path_enhancer + \"\\n ---- return_path: \" + return_path)\n\n try:\n enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n print(\" -- len of enhanced_images_gen_with_len -- \" + str(len(enhanced_images_gen_with_len)))\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n print(\"\\n -------- try execute enhanced_path ---\" + enhanced_path + \"\\n ---- path:\" + path+ \"\\n ---- full_video_path:\" + full_video_path)\n except:\n enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n print(\" -- len of enhanced_images_gen_with_len -- \" + str(len(enhanced_images_gen_with_len)))\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n print(\"\\n -------- except execute enhanced_path ---\" + enhanced_path+ \"\\n ---- path:\" + path+ \"\\n ---- full_video_path:\" + full_video_path)\n \n save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)\n print(f' ---- The enhance generated video is named {video_save_dir}/{video_name_enhancer}')\n # os.remove(enhanced_path)\n\n # حالت فول تصویر پس‌بازگشت\n # paste_pic(av_path_enhancer, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n # print(f'The final enhancer generated video is named {full_video_path}') \n # return_path = full_video_path\n \n # os.remove(path)\n # os.remove(new_audio_path)\n print(f' ---- Final return_path: {return_path}')\n\n return return_path" }, { "identifier": "get_data", "path": "src/generate_batch.py", "snippet": "def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False, idlemode=False, length_of_audio=False, use_blink=True):\n\n syncnet_mel_step_size = 16\n fps = 25\n\n pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]\n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n\n \n if idlemode:\n num_frames = int(length_of_audio * 25)\n indiv_mels = np.zeros((num_frames, 80, 16))\n else:\n wav = audio.load_wav(audio_path, 16000) \n wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)\n wav = crop_pad_audio(wav, wav_length)\n orig_mel = audio.melspectrogram(wav).T\n spec = orig_mel.copy() # nframes 80\n indiv_mels = []\n\n for i in tqdm(range(num_frames), 'mel:'):\n start_frame_num = i-2\n start_idx = int(80. * (start_frame_num / float(fps)))\n end_idx = start_idx + syncnet_mel_step_size\n seq = list(range(start_idx, end_idx))\n seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]\n m = spec[seq, :]\n indiv_mels.append(m.T)\n indiv_mels = np.asarray(indiv_mels) # T 80 16\n\n ratio = generate_blink_seq_randomly(num_frames) # T\n source_semantics_path = first_coeff_path\n source_semantics_dict = scio.loadmat(source_semantics_path)\n ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)\n\n if ref_eyeblink_coeff_path is not None:\n ratio[:num_frames] = 0\n refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)\n refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]\n refeyeblink_num_frames = refeyeblink_coeff.shape[0]\n if refeyeblink_num_frames<num_frames:\n div = num_frames//refeyeblink_num_frames\n re = num_frames%refeyeblink_num_frames\n refeyeblink_coeff_list = [refeyeblink_coeff for i in range(div)]\n refeyeblink_coeff_list.append(refeyeblink_coeff[:re, :64])\n refeyeblink_coeff = np.concatenate(refeyeblink_coeff_list, axis=0)\n print(refeyeblink_coeff.shape[0])\n\n ref_coeff[:, :64] = refeyeblink_coeff[:num_frames, :64] \n \n indiv_mels = torch.FloatTensor(indiv_mels).unsqueeze(1).unsqueeze(0) # bs T 1 80 16\n\n if use_blink:\n ratio = torch.FloatTensor(ratio).unsqueeze(0) # bs T\n else:\n ratio = torch.FloatTensor(ratio).unsqueeze(0).fill_(0.) \n # bs T\n ref_coeff = torch.FloatTensor(ref_coeff).unsqueeze(0) # bs 1 70\n\n indiv_mels = indiv_mels.to(device)\n ratio = ratio.to(device)\n ref_coeff = ref_coeff.to(device)\n\n return {'indiv_mels': indiv_mels, \n 'ref': ref_coeff, \n 'num_frames': num_frames, \n 'ratio_gt': ratio,\n 'audio_name': audio_name, 'pic_name': pic_name}" }, { "identifier": "get_facerender_data", "path": "src/generate_facerender_batch.py", "snippet": "def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, \n batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, \n expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):\n\n semantic_radius = 13\n video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]\n txt_path = os.path.splitext(coeff_path)[0]\n\n data={}\n\n img1 = Image.open(pic_path)\n source_image = np.array(img1)\n source_image = img_as_float32(source_image)\n source_image = transform.resize(source_image, (size, size, 3))\n source_image = source_image.transpose((2, 0, 1))\n source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)\n source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)\n data['source_image'] = source_image_ts\n \n source_semantics_dict = scio.loadmat(first_coeff_path)\n generated_dict = scio.loadmat(coeff_path)\n\n if 'full' not in preprocess.lower():\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n else:\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)\n source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)\n source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)\n data['source_semantics'] = source_semantics_ts\n\n # target \n generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale\n\n if 'full' in preprocess.lower():\n generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)\n\n if still_mode:\n generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)\n\n with open(txt_path+'.txt', 'w') as f:\n for coeff in generated_3dmm:\n for i in coeff:\n f.write(str(i)[:7] + ' '+'\\t')\n f.write('\\n')\n\n target_semantics_list = [] \n frame_num = generated_3dmm.shape[0]\n data['frame_num'] = frame_num\n for frame_idx in range(frame_num):\n target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)\n target_semantics_list.append(target_semantics)\n\n remainder = frame_num%batch_size\n if remainder!=0:\n for _ in range(batch_size-remainder):\n target_semantics_list.append(target_semantics)\n\n target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1\n target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])\n data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)\n data['video_name'] = video_name\n data['audio_path'] = audio_path\n \n if input_yaw_list is not None:\n yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)\n data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)\n if input_pitch_list is not None:\n pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)\n data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)\n if input_roll_list is not None:\n roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) \n data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)\n \n return data" }, { "identifier": "init_path", "path": "src/utils/init_path.py", "snippet": "def init_path(checkpoint_dir, config_dir, size=512, old_version=False, preprocess='crop'):\n\n if old_version:\n #### load all the checkpoint of `pth`\n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n use_safetensor = False\n elif len(glob.glob(os.path.join(checkpoint_dir, '*.safetensors'))):\n print('using safetensor as default')\n sadtalker_paths = {\n \"checkpoint\":os.path.join(checkpoint_dir, 'SadTalker_V0.0.2_'+str(size)+'.safetensors'),\n }\n use_safetensor = True\n else:\n print(\"WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!\")\n use_safetensor = False\n \n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n sadtalker_paths['dir_of_BFM_fitting'] = os.path.join(config_dir) # , 'BFM_Fitting'\n sadtalker_paths['audio2pose_yaml_path'] = os.path.join(config_dir, 'auido2pose.yaml')\n sadtalker_paths['audio2exp_yaml_path'] = os.path.join(config_dir, 'auido2exp.yaml')\n sadtalker_paths['use_safetensor'] = use_safetensor # os.path.join(config_dir, 'auido2exp.yaml')\n\n if 'full' in preprocess:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00109-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender_still.yaml')\n else:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00229-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender.yaml')\n\n return sadtalker_paths" }, { "identifier": "CropAndExtract", "path": "src/utils/preprocess.py", "snippet": "class CropAndExtract():\n def __init__(self, sadtalker_path, device):\n\n self.propress = Preprocesser(device)\n self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device)\n \n if sadtalker_path['use_safetensor']:\n checkpoint = safetensors.torch.load_file(sadtalker_path['checkpoint']) \n self.net_recon.load_state_dict(load_x_from_safetensor(checkpoint, 'face_3drecon'))\n else:\n checkpoint = torch.load(sadtalker_path['path_of_net_recon_model'], map_location=torch.device(device)) \n self.net_recon.load_state_dict(checkpoint['net_recon'])\n\n self.net_recon.eval()\n self.lm3d_std = load_lm3d(sadtalker_path['dir_of_BFM_fitting'])\n self.device = device\n \n def generate(self, input_path, save_dir, crop_or_resize='crop', source_image_flag=False, pic_size=256):\n\n pic_name = os.path.splitext(os.path.split(input_path)[-1])[0] \n\n landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt') \n coeff_path = os.path.join(save_dir, pic_name+'.mat') \n png_path = os.path.join(save_dir, pic_name+'.png') \n\n #load input\n if not os.path.isfile(input_path):\n raise ValueError('input_path must be a valid path to video/image file')\n elif input_path.split('.')[-1] in ['jpg', 'png', 'jpeg']:\n # loader for first frame\n full_frames = [cv2.imread(input_path)]\n fps = 25\n else:\n # loader for videos\n video_stream = cv2.VideoCapture(input_path)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n full_frames = [] \n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break \n full_frames.append(frame) \n if source_image_flag:\n break\n\n x_full_frames= [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in full_frames] \n\n #### crop images as the \n if 'crop' in crop_or_resize.lower(): # default crop\n x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512)\n clx, cly, crx, cry = crop\n lx, ly, rx, ry = quad\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad)\n elif 'full' in crop_or_resize.lower():\n x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512)\n clx, cly, crx, cry = crop\n lx, ly, rx, ry = quad\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad)\n else: # resize mode\n oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1] \n crop_info = ((ox2 - ox1, oy2 - oy1), None, None)\n\n frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames]\n if len(frames_pil) == 0:\n print('No face is detected in the input file')\n return None, None\n\n # save crop info\n for frame in frames_pil:\n cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR))\n\n # 2. get the landmark according to the detected face. \n if not os.path.isfile(landmarks_path): \n lm = self.propress.predictor.extract_keypoint(frames_pil, landmarks_path)\n else:\n print(' Using saved landmarks.')\n lm = np.loadtxt(landmarks_path).astype(np.float32)\n lm = lm.reshape([len(x_full_frames), -1, 2])\n\n if not os.path.isfile(coeff_path):\n # load 3dmm paramter generator from Deep3DFaceRecon_pytorch \n video_coeffs, full_coeffs = [], []\n for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'):\n frame = frames_pil[idx]\n W,H = frame.size\n lm1 = lm[idx].reshape([-1, 2])\n \n if np.mean(lm1) == -1:\n lm1 = (self.lm3d_std[:, :2]+1)/2.\n lm1 = np.concatenate(\n [lm1[:, :1]*W, lm1[:, 1:2]*H], 1\n )\n else:\n lm1[:, -1] = H - 1 - lm1[:, -1]\n\n trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std)\n \n trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32)\n im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0)\n \n with torch.no_grad():\n full_coeff = self.net_recon(im_t)\n coeffs = split_coeff(full_coeff)\n\n pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs}\n \n pred_coeff = np.concatenate([\n pred_coeff['exp'], \n pred_coeff['angle'],\n pred_coeff['trans'],\n trans_params[2:][None],\n ], 1)\n video_coeffs.append(pred_coeff)\n full_coeffs.append(full_coeff.cpu().numpy())\n\n semantic_npy = np.array(video_coeffs)[:,0] \n\n savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]})\n\n return coeff_path, png_path, crop_info" }, { "identifier": "Audio2Coeff", "path": "src/test_audio2coeff.py", "snippet": "class Audio2Coeff():\n\n def __init__(self, sadtalker_path, device):\n #load config\n fcfg_pose = open(sadtalker_path['audio2pose_yaml_path'])\n cfg_pose = CN.load_cfg(fcfg_pose)\n cfg_pose.freeze()\n fcfg_exp = open(sadtalker_path['audio2exp_yaml_path'])\n cfg_exp = CN.load_cfg(fcfg_exp)\n cfg_exp.freeze()\n\n # load audio2pose_model\n self.audio2pose_model = Audio2Pose(cfg_pose, None, device=device)\n self.audio2pose_model = self.audio2pose_model.to(device)\n self.audio2pose_model.eval()\n for param in self.audio2pose_model.parameters():\n param.requires_grad = False \n \n try:\n if sadtalker_path['use_safetensor']:\n checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint'])\n self.audio2pose_model.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2pose'))\n else:\n load_cpk(sadtalker_path['audio2pose_checkpoint'], model=self.audio2pose_model, device=device)\n except:\n raise Exception(\"Failed in loading audio2pose_checkpoint\")\n\n # load audio2exp_model\n netG = SimpleWrapperV2()\n netG = netG.to(device)\n for param in netG.parameters():\n netG.requires_grad = False\n netG.eval()\n try:\n if sadtalker_path['use_safetensor']:\n checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint'])\n netG.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2exp'))\n else:\n load_cpk(sadtalker_path['audio2exp_checkpoint'], model=netG, device=device)\n except:\n raise Exception(\"Failed in loading audio2exp_checkpoint\")\n self.audio2exp_model = Audio2Exp(netG, cfg_exp, device=device, prepare_training_loss=False)\n self.audio2exp_model = self.audio2exp_model.to(device)\n for param in self.audio2exp_model.parameters():\n param.requires_grad = False\n self.audio2exp_model.eval()\n \n self.device = device\n\n def generate(self, batch, coeff_save_dir, pose_style, ref_pose_coeff_path=None):\n\n with torch.no_grad():\n #test\n results_dict_exp= self.audio2exp_model.test(batch)\n exp_pred = results_dict_exp['exp_coeff_pred'] #bs T 64\n\n #for class_id in range(1):\n #class_id = 0#(i+10)%45\n #class_id = random.randint(0,46) #46 styles can be selected \n batch['class'] = torch.LongTensor([pose_style]).to(self.device)\n results_dict_pose = self.audio2pose_model.test(batch) \n pose_pred = results_dict_pose['pose_pred'] #bs T 6\n\n pose_len = pose_pred.shape[1]\n if pose_len<13: \n pose_len = int((pose_len-1)/2)*2+1\n pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), pose_len, 2, axis=1)).to(self.device)\n else:\n pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), 13, 2, axis=1)).to(self.device) \n \n coeffs_pred = torch.cat((exp_pred, pose_pred), dim=-1) #bs T 70\n\n coeffs_pred_numpy = coeffs_pred[0].clone().detach().cpu().numpy() \n\n if ref_pose_coeff_path is not None: \n coeffs_pred_numpy = self.using_refpose(coeffs_pred_numpy, ref_pose_coeff_path)\n \n savemat(os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])), \n {'coeff_3dmm': coeffs_pred_numpy})\n\n return os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name']))\n \n def using_refpose(self, coeffs_pred_numpy, ref_pose_coeff_path):\n num_frames = coeffs_pred_numpy.shape[0]\n refpose_coeff_dict = loadmat(ref_pose_coeff_path)\n refpose_coeff = refpose_coeff_dict['coeff_3dmm'][:,64:70]\n refpose_num_frames = refpose_coeff.shape[0]\n if refpose_num_frames<num_frames:\n div = num_frames//refpose_num_frames\n re = num_frames%refpose_num_frames\n refpose_coeff_list = [refpose_coeff for i in range(div)]\n refpose_coeff_list.append(refpose_coeff[:re, :])\n refpose_coeff = np.concatenate(refpose_coeff_list, axis=0)\n\n #### relative head pose\n coeffs_pred_numpy[:, 64:70] = coeffs_pred_numpy[:, 64:70] + ( refpose_coeff[:num_frames, :] - refpose_coeff[0:1, :] )\n return coeffs_pred_numpy" }, { "identifier": "get_data", "path": "src/generate_batch.py", "snippet": "def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False, idlemode=False, length_of_audio=False, use_blink=True):\n\n syncnet_mel_step_size = 16\n fps = 25\n\n pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]\n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n\n \n if idlemode:\n num_frames = int(length_of_audio * 25)\n indiv_mels = np.zeros((num_frames, 80, 16))\n else:\n wav = audio.load_wav(audio_path, 16000) \n wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)\n wav = crop_pad_audio(wav, wav_length)\n orig_mel = audio.melspectrogram(wav).T\n spec = orig_mel.copy() # nframes 80\n indiv_mels = []\n\n for i in tqdm(range(num_frames), 'mel:'):\n start_frame_num = i-2\n start_idx = int(80. * (start_frame_num / float(fps)))\n end_idx = start_idx + syncnet_mel_step_size\n seq = list(range(start_idx, end_idx))\n seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]\n m = spec[seq, :]\n indiv_mels.append(m.T)\n indiv_mels = np.asarray(indiv_mels) # T 80 16\n\n ratio = generate_blink_seq_randomly(num_frames) # T\n source_semantics_path = first_coeff_path\n source_semantics_dict = scio.loadmat(source_semantics_path)\n ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)\n\n if ref_eyeblink_coeff_path is not None:\n ratio[:num_frames] = 0\n refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)\n refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]\n refeyeblink_num_frames = refeyeblink_coeff.shape[0]\n if refeyeblink_num_frames<num_frames:\n div = num_frames//refeyeblink_num_frames\n re = num_frames%refeyeblink_num_frames\n refeyeblink_coeff_list = [refeyeblink_coeff for i in range(div)]\n refeyeblink_coeff_list.append(refeyeblink_coeff[:re, :64])\n refeyeblink_coeff = np.concatenate(refeyeblink_coeff_list, axis=0)\n print(refeyeblink_coeff.shape[0])\n\n ref_coeff[:, :64] = refeyeblink_coeff[:num_frames, :64] \n \n indiv_mels = torch.FloatTensor(indiv_mels).unsqueeze(1).unsqueeze(0) # bs T 1 80 16\n\n if use_blink:\n ratio = torch.FloatTensor(ratio).unsqueeze(0) # bs T\n else:\n ratio = torch.FloatTensor(ratio).unsqueeze(0).fill_(0.) \n # bs T\n ref_coeff = torch.FloatTensor(ref_coeff).unsqueeze(0) # bs 1 70\n\n indiv_mels = indiv_mels.to(device)\n ratio = ratio.to(device)\n ref_coeff = ref_coeff.to(device)\n\n return {'indiv_mels': indiv_mels, \n 'ref': ref_coeff, \n 'num_frames': num_frames, \n 'ratio_gt': ratio,\n 'audio_name': audio_name, 'pic_name': pic_name}" }, { "identifier": "get_facerender_data", "path": "src/generate_facerender_batch.py", "snippet": "def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, \n batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, \n expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):\n\n semantic_radius = 13\n video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]\n txt_path = os.path.splitext(coeff_path)[0]\n\n data={}\n\n img1 = Image.open(pic_path)\n source_image = np.array(img1)\n source_image = img_as_float32(source_image)\n source_image = transform.resize(source_image, (size, size, 3))\n source_image = source_image.transpose((2, 0, 1))\n source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)\n source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)\n data['source_image'] = source_image_ts\n \n source_semantics_dict = scio.loadmat(first_coeff_path)\n generated_dict = scio.loadmat(coeff_path)\n\n if 'full' not in preprocess.lower():\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n else:\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)\n source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)\n source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)\n data['source_semantics'] = source_semantics_ts\n\n # target \n generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale\n\n if 'full' in preprocess.lower():\n generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)\n\n if still_mode:\n generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)\n\n with open(txt_path+'.txt', 'w') as f:\n for coeff in generated_3dmm:\n for i in coeff:\n f.write(str(i)[:7] + ' '+'\\t')\n f.write('\\n')\n\n target_semantics_list = [] \n frame_num = generated_3dmm.shape[0]\n data['frame_num'] = frame_num\n for frame_idx in range(frame_num):\n target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)\n target_semantics_list.append(target_semantics)\n\n remainder = frame_num%batch_size\n if remainder!=0:\n for _ in range(batch_size-remainder):\n target_semantics_list.append(target_semantics)\n\n target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1\n target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])\n data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)\n data['video_name'] = video_name\n data['audio_path'] = audio_path\n \n if input_yaw_list is not None:\n yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)\n data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)\n if input_pitch_list is not None:\n pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)\n data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)\n if input_roll_list is not None:\n roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) \n data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)\n \n return data" }, { "identifier": "init_path", "path": "src/utils/init_path.py", "snippet": "def init_path(checkpoint_dir, config_dir, size=512, old_version=False, preprocess='crop'):\n\n if old_version:\n #### load all the checkpoint of `pth`\n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n use_safetensor = False\n elif len(glob.glob(os.path.join(checkpoint_dir, '*.safetensors'))):\n print('using safetensor as default')\n sadtalker_paths = {\n \"checkpoint\":os.path.join(checkpoint_dir, 'SadTalker_V0.0.2_'+str(size)+'.safetensors'),\n }\n use_safetensor = True\n else:\n print(\"WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!\")\n use_safetensor = False\n \n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n sadtalker_paths['dir_of_BFM_fitting'] = os.path.join(config_dir) # , 'BFM_Fitting'\n sadtalker_paths['audio2pose_yaml_path'] = os.path.join(config_dir, 'auido2pose.yaml')\n sadtalker_paths['audio2exp_yaml_path'] = os.path.join(config_dir, 'auido2exp.yaml')\n sadtalker_paths['use_safetensor'] = use_safetensor # os.path.join(config_dir, 'auido2exp.yaml')\n\n if 'full' in preprocess:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00109-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender_still.yaml')\n else:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00229-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender.yaml')\n\n return sadtalker_paths" } ]
from glob import glob from time import strftime from argparse import ArgumentParser from src.facerender.animate_onnx import AnimateFromCoeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.utils.preprocess import CropAndExtract from src.test_audio2coeff import Audio2Coeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.face3d.visualize import gen_composed_video import shutil import torch import os, sys, time import base64
13,229
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device) audio_to_coeff = Audio2Coeff(sadtalker_paths, device) animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) #crop image and extract 3dmm from image first_frame_dir = os.path.join(save_dir, 'first_frame_dir') os.makedirs(first_frame_dir, exist_ok=True) print('3DMM Extraction for source image') first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\ source_image_flag=True, pic_size=args.size) if first_coeff_path is None: print("Can't get the coeffs of the input") return if ref_eyeblink is not None: ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0] ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname) os.makedirs(ref_eyeblink_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing eye blinking') ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False) else: ref_eyeblink_coeff_path=None if ref_pose is not None: if ref_pose == ref_eyeblink: ref_pose_coeff_path = ref_eyeblink_coeff_path else: ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0] ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname) os.makedirs(ref_pose_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing pose') ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False) else: ref_pose_coeff_path=None #audio2ceoff batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still) coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path) # 3dface render if args.face3dvis: gen_composed_video(args, device, first_coeff_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4')) #coeff2video
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device) audio_to_coeff = Audio2Coeff(sadtalker_paths, device) animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) #crop image and extract 3dmm from image first_frame_dir = os.path.join(save_dir, 'first_frame_dir') os.makedirs(first_frame_dir, exist_ok=True) print('3DMM Extraction for source image') first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\ source_image_flag=True, pic_size=args.size) if first_coeff_path is None: print("Can't get the coeffs of the input") return if ref_eyeblink is not None: ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0] ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname) os.makedirs(ref_eyeblink_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing eye blinking') ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False) else: ref_eyeblink_coeff_path=None if ref_pose is not None: if ref_pose == ref_eyeblink: ref_pose_coeff_path = ref_eyeblink_coeff_path else: ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0] ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname) os.makedirs(ref_pose_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing pose') ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False) else: ref_pose_coeff_path=None #audio2ceoff batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still) coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style, ref_pose_coeff_path) # 3dface render if args.face3dvis: gen_composed_video(args, device, first_coeff_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4')) #coeff2video
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path,
7
2023-11-25 06:53:12+00:00
16k
microsoft/Project-BayesDAG
src/causica/models/bayesdag/bayesdag_linear.py
[ { "identifier": "Variables", "path": "src/causica/datasets/variables.py", "snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[Variable]] = None,\n used_cols: Optional[List[int]] = None,\n ) -> None:\n \"\"\"\n Args:\n variables: A list Variable objects.\n auxiliary_variables: A list of Variable objects only used for input into VAE,\n not produced in output.\n These are assumed to be appended onto the end of the variables in the data.\n Defaults to None - no aux variables present.\n used_cols: A list of column ids that were used when processing the original data.\n \"\"\"\n if not auxiliary_variables:\n auxiliary_variables = []\n self.auxiliary_variables = auxiliary_variables\n self._variables = variables\n\n self._deduplicate_names()\n\n # Dictionary mapping from variable name to variable index.\n self.name_to_idx = {var.name: idx for idx, var in enumerate(self._variables)}\n\n # Lists containing query and target variable indices\n self.target_var_idxs = []\n self.not_target_var_idxs = []\n self.query_var_idxs = []\n self.not_query_var_idxs = []\n for idx, var in enumerate(self._variables):\n if var.query:\n self.query_var_idxs.append(idx)\n else:\n self.not_query_var_idxs.append(idx)\n if var.target:\n self.target_var_idxs.append(idx)\n else:\n self.not_target_var_idxs.append(idx)\n\n if len(self.target_var_idxs) > 0 and all(idx in self.query_var_idxs for idx in self.target_var_idxs):\n warnings.warn(\n \"All target variables are marked as queriable, it is likely that active learning will always \"\n \"select these variables first.\"\n )\n\n # Lists containing continuous (including text) and binary/categorical variable indices\n self.var_idxs_by_type: DefaultDict[str, List[int]] = defaultdict(list)\n for idx, var in enumerate(self._variables + self.auxiliary_variables):\n self.var_idxs_by_type[var.type_].append(idx)\n\n # List of lists, where self.unprocessed_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data.\n self.unprocessed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.unprocessed_non_aux_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data (non-auxiliary).\n self.unprocessed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_cols[i] gives the columns occupied by the ith variable in the processed\n # data.\n self.processed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.processed_dim\n self.processed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_non_aux_cols[i] gives the columns occupied by the ith variable in the processed\n # data (non-auxiliary).\n self.processed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.processed_dim\n self.processed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # Set of all query group names, maintaining order in which they are first encountered when iterating through\n # the variables list. This is the simplest way to do this since dictionaries are guaranteed to be\n # insertion-ordered since Python 3.7\n self.group_names = list(dict.fromkeys([var.group_name for var in self._variables]))\n\n # List containing indices for each query group, where the query group names are assumed to be in the same order\n # as self.group_names\n self.group_idxs = [\n [idx for idx, var in enumerate(self._variables) if var.group_name == group_name]\n for group_name in self.group_names\n ]\n\n # Remove groups containing no queriable variables from self.group_names and self.group_idxs, as\n # we can guarantee that we will never query these groups.\n is_group_queriable = [any(self._variables[idx].query for idx in idxs) for idxs in self.group_idxs]\n\n self.group_names = [name for group_idx, name in enumerate(self.group_names) if is_group_queriable[group_idx]]\n self.group_idxs = [idxs for group_idx, idxs in enumerate(self.group_idxs) if is_group_queriable[group_idx]]\n\n # Save the list of observed column ids\n default_used_cols = list(range(len(self._variables) + len(auxiliary_variables))) # All columns observed\n self.used_cols = used_cols if used_cols is not None else default_used_cols\n assert len(self.used_cols) == len(self._variables) + len(self.auxiliary_variables)\n\n self.col_id_to_var_index = {old: new for new, old in enumerate(self.used_cols)}\n\n def __repr__(self):\n return str(self._variables)\n\n def __iter__(self) -> Iterator[Variable]:\n \"\"\"\n Iterate through the variables within the container.\n Note - Now it iterate through all the variables within the container\n (including auxiliary variables, if they are present)\n \"\"\"\n for var in self._all_variables:\n yield var\n\n def __getitem__(self, idx):\n return (self._all_variables)[idx]\n\n def __len__(self) -> int:\n return len(self._variables) + len(self.auxiliary_variables)\n\n @classmethod\n def create_from_json(cls, path: str) -> Variables:\n return cls.create_from_dict(read_json_as(path, dict))\n\n @classmethod\n def create_from_dict(cls, variables_dict: Dict[str, List[Any]]) -> Variables:\n \"\"\"\n Create variables object from a dictionary\n \"\"\"\n variables = variables_dict[\"variables\"]\n for var in variables:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n var_obj_list = [Variable(**var) for var in variables]\n\n auxiliary_vars = variables_dict.get(\"auxiliary_variables\", [])\n if len(auxiliary_vars) == 0:\n auxiliary_vars_obj = None\n else:\n for var in auxiliary_vars:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n\n auxiliary_vars_obj = [Variable(**var) for var in auxiliary_vars]\n\n used_cols = variables_dict.get(\"used_cols\", None)\n\n return cls(var_obj_list, auxiliary_vars_obj, used_cols)\n\n @classmethod\n def create_from_data_and_dict(\n cls, data: np.ndarray, mask: np.ndarray, variables_dict: Optional[Dict[str, Any]] = None\n ) -> Variables:\n \"\"\"\n Create variables object from an input dictionary, inferring missing fields using `data` and `mask`.\n \"\"\"\n # Infer missing fields in variables_dict\n variables_dict = cls.infer_from_data(data, mask, variables_dict, True)\n variables = cls.create_from_dict(variables_dict)\n return variables\n\n @staticmethod\n def _metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n ) -> Tuple[List[Any], Union[List[Any], None]]:\n \"\"\"\n Infer variables_metadata from input data\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n variables_type: is it aux variables, or normal variables\n Returns:\n varaibles_metadata: inferred metadata from input data\n A list of column ids that were used when processing the original data.\n \"\"\"\n\n variables_metadata = []\n # Use None rather than {} as default since mutable default args are dangerous in Python.\n used_cols = variables_dict.get(\"used_cols\", None)\n if used_cols:\n used_cols = cast(List[int], used_cols)\n assert len(used_cols) == data.shape[1]\n\n for idx, variable_metadata in enumerate(variables_dict[variables_type]):\n if not all(\n k in variable_metadata for k in [\"name\", \"type\", \"lower\", \"upper\", \"query\", \"target\", \"always_observed\"]\n ):\n # If variable metadata fully specified, do not try to infer, as doing column indexing can be expensive\n # for CSR sparse matrices.\n var_data = data[:, idx]\n var_mask = mask[:, idx]\n if issparse(var_data):\n var_data = var_data.toarray()\n var_mask = var_mask.toarray()\n\n if \"name\" not in variable_metadata:\n if used_cols:\n variable_metadata[\"name\"] = str(used_cols[idx])\n else:\n variable_metadata[\"name\"] = f\"Column {idx}\"\n\n # If data type/min max/num categories specified explicitly, overwrite variables file\n if \"type\" not in variable_metadata:\n # Test if all unmasked elements are integers\n\n if np.all((var_data * var_mask) // 1 == var_data * var_mask):\n if (var_data * var_mask).max() <= 1:\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as binary. This can be '\n \"changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"binary\"\n else:\n # Note that we always infer integer values with a max value > 1 as categorical. This may want to be\n # reconsidered if support for ordinal variables is introduced at a later date.\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as categorical. This can be'\n \" changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"categorical\"\n else:\n variable_metadata[\"type\"] = \"continuous\"\n\n if \"lower\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_lower = 0\n else:\n inferred_lower = min(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"lower\"] = inferred_lower\n print(\n f'Minimum value of variable {variable_metadata[\"name\"]} inferred as {inferred_lower}. This'\n \" can be changed manually in the dataset's variables.json file\"\n )\n\n if \"upper\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_upper = 1\n else:\n inferred_upper = max(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"upper\"] = inferred_upper\n print(\n f'Max value of variable {variable_metadata[\"name\"]} inferred as {inferred_upper}. This can '\n \"be changed manually in the dataset's variables.json file\"\n )\n\n if \"query\" not in variable_metadata:\n # By default, assume all variables can be queried unless specified otherwise.\n if variables_type == \"auxiliary_variables\":\n variable_metadata[\"query\"] = False\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a non-queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n else:\n variable_metadata[\"query\"] = True\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n\n if \"target\" not in variable_metadata:\n # By default, assume variable is a target if and only if it is not queriable.\n variable_metadata[\"target\"] = not variable_metadata[\"query\"]\n fill_string = \"not \" if not variable_metadata[\"target\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an active learning target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"target\" field.'\n )\n\n if \"always_observed\" not in variable_metadata:\n # By default, assume variable is always observed if there is no missing in the mask.\n if np.sum((var_mask - 1) ** 2) == 0:\n variable_metadata[\"always_observed\"] = True\n else:\n variable_metadata[\"always_observed\"] = False\n fill_string = \"not \" if not variable_metadata[\"always_observed\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an always observed target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"always_observed\" field.'\n )\n\n variables_metadata.append(variable_metadata)\n\n return variables_metadata, used_cols\n\n @staticmethod\n def infer_from_data(data, mask, variables_dict=None, infer_aux_variables=False) -> Dict[str, List[Any]]:\n \"\"\"\n Infer missing values in an input variables dictionary, using the input data.\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n infer_aux_variables: infer auxiliary variables for GINA or not.\n Returns:\n variables_dict: Updated version of the input variables_dict, with missing variables and fields inferred from the\n data.\n \"\"\"\n\n if variables_dict is None:\n variables_dict = {}\n\n # NOTE this assumes all variables have only one column in unprocessed data, which should always be the case when\n # inferring from a dataset.\n if \"auxiliary_variables\" not in variables_dict:\n variables_dict[\"auxiliary_variables\"] = []\n\n if \"variables\" not in variables_dict or variables_dict[\"variables\"] == []:\n num_var_cols = data.shape[1] - len(variables_dict[\"auxiliary_variables\"])\n variables_dict[\"variables\"] = [{} for _ in range(num_var_cols)]\n\n variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": variables_dict[\"auxiliary_variables\"],\n \"used_cols\": used_cols,\n }\n if infer_aux_variables:\n aux_variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"auxiliary_variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": aux_variables_metadata,\n \"used_cols\": used_cols,\n }\n\n return variables_dict\n\n @property\n def _all_variables(self):\n return self._variables + self.auxiliary_variables\n\n @property\n def has_auxiliary(self) -> bool:\n \"\"\"\n True if there are aux variables present.\n \"\"\"\n return len(self.auxiliary_variables) > 0\n\n @property\n def binary_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all binary variables.\n \"\"\"\n return self.var_idxs_by_type[\"binary\"]\n\n @property\n def categorical_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all categorical variables.\n \"\"\"\n return self.var_idxs_by_type[\"categorical\"]\n\n @property\n def discrete_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all discrete (i.e. binary or categorical) variables. We sort to ensure that the\n combined list is in ascending order.\n \"\"\"\n return sorted(self.var_idxs_by_type[\"categorical\"] + self.var_idxs_by_type[\"binary\"])\n\n @property\n def continuous_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all continuous variables.\n \"\"\"\n return self.var_idxs_by_type[\"continuous\"]\n\n @property\n def text_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all text variables.\n \"\"\"\n return self.var_idxs_by_type[\"text\"]\n\n @property\n def non_text_idxs(self) -> List[bool]:\n \"\"\"Helper method. Returns list of booleans, where an element\n at index i indicates whether a variable at index i is non-text or not\n e.g. For Variables object of [...\"continous\"..., ...\"text\"..., \"continuous\"],\n the result would be [True, False, True]\n \"\"\"\n unproc_cols_by_type = self.unprocessed_cols_by_type\n if \"text\" not in unproc_cols_by_type:\n return [True for _ in range(len(self))]\n return (~np.in1d(range(len(self)), unproc_cols_by_type[\"text\"])).tolist()\n\n @property\n def num_unprocessed_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_cols)\n\n @property\n def num_unprocessed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_non_aux_cols)\n\n @property\n def num_processed_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_cols)\n\n @property\n def num_processed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_non_aux_cols)\n\n @property\n def num_groups(self) -> int:\n \"\"\"\n Return the number of unique query groups in the variables object.\n \"\"\"\n return len(self.group_names)\n\n @property\n def group_mask(self) -> np.ndarray:\n \"\"\"\n Return a mask of shape (num_groups, num_processed_cols) indicating which column\n corresponds to which group.\n \"\"\"\n mask = np.zeros((self.num_groups, self.num_processed_cols), dtype=bool)\n for group_idx, group in enumerate(self.group_idxs):\n for var in group:\n for proc_col in self.processed_cols[var]:\n mask[group_idx, proc_col] = 1\n return mask\n\n @property\n def proc_always_observed_list(self) -> List[Optional[bool]]:\n \"\"\"\n The mask that indicates if the variable is always observed (for processed data)\n \"\"\"\n return sum(([var.always_observed] * var.processed_dim for var in self._all_variables), [])\n\n @property\n def processed_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data associated with each variable of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._all_variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def processed_non_aux_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data (w/o aux variables) associated with each\n variable of that type.\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def unprocessed_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._all_variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n @property\n def unprocessed_non_aux_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n def subset(self, idxs: List[int], auxiliary_idxs: Optional[List[int]] = None) -> Variables:\n \"\"\"\n Returns a new Variables object containing only the Variable objects whose indices are given in `idxs`.\n Note that this currently ignores metadata variables.\n \"\"\"\n if auxiliary_idxs is None:\n auxiliary_idxs = []\n\n variables_list = [self._variables[idx] for idx in idxs]\n auxiliary_variables_list = [self.auxiliary_variables[idx] for idx in auxiliary_idxs]\n return Variables(variables_list, auxiliary_variables_list)\n\n def to_dict(self) -> Dict[str, Any]:\n variables_list = [var.to_json() for var in self._variables]\n if self.auxiliary_variables is None:\n auxiliary_vars_list = []\n else:\n auxiliary_vars_list = [var.to_json() for var in self.auxiliary_variables]\n\n variables_json = {\n \"variables\": variables_list,\n \"auxiliary_variables\": auxiliary_vars_list,\n \"used_cols\": [int(col) for col in self.used_cols],\n }\n return variables_json\n\n def save(self, path: str) -> None:\n variables_json = self.to_dict()\n save_json(variables_json, path)\n\n def as_list(self) -> List[Variable]:\n return self._variables\n\n def get_idxs_from_name_list(self, variable_names: List[Union[str, int]]) -> np.ndarray:\n \"\"\"\n Get a binary array of shape (variable_count,), where for each index the array value is 1 if the corresponding\n variable is named in `variable_names`, and 0 otherwise.\n \"\"\"\n variables_to_query = np.zeros((len(self._variables),))\n # Look up indices of specified variables and mark as queriable.\n for variable_name in variable_names:\n # Cast name to string in case numeric names (e.g. question ids) have been input as integers.\n variable_name = str(variable_name)\n variable_idx = self.name_to_idx[variable_name]\n variables_to_query[variable_idx] = 1\n\n return variables_to_query\n\n def get_observable_groups(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of indices for groups that are still observable in the current row\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n list of indices of groups that can be observed, where the indices correspond to the corresponding group\n names in `self.group_names`.\n \"\"\"\n observable_variables_idxs = self.get_observable_variable_idxs(data_mask_row, obs_mask_row)\n observable_groups_idxs: List[int] = []\n for group_idx, idxs in enumerate(self.group_idxs):\n if any(i in observable_variables_idxs for i in idxs):\n observable_groups_idxs.append(group_idx)\n return observable_groups_idxs\n\n def get_observable_variable_idxs(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of variable idxs for variables that are still observable in the current row.\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n observable_vars: List of indices of variables that can be observed.\n \"\"\"\n if data_mask_row.ndim != 1:\n raise ValueError(f\"Test mask should be 1D, had {data_mask_row.ndim} dims and shape {data_mask_row.shape}.\")\n if obs_mask_row.ndim != 1:\n raise ValueError(\n f\"Observation mask should be 1D, had {obs_mask_row.ndim} dims and shape {obs_mask_row.shape}.\"\n )\n if len(obs_mask_row) != len(data_mask_row) or len(data_mask_row) != len(self._variables):\n # One likely cause is accidentally passing 'processed' masks, which may be longer\n # if some variables are categorical.\n raise ValueError(\n f\"Lengths of obs_mask_row {len(obs_mask_row)}, data_mask_row {len(data_mask_row)}, \"\n f\"and variables list {len(self._variables)} should all be the same.\"\n )\n # Get ids where there is an underlying data value (test_mask == 1) and that we haven't yet queried (obs_mask == 0)\n unobserved_idxs = np.where((data_mask_row == 1) & (obs_mask_row == 0))[0]\n\n # Intersection of these and query_var_idxs.\n observable_idx_set = set(unobserved_idxs).intersection(set(self.query_var_idxs))\n return list(observable_idx_set)\n\n def get_var_cols_from_data(self, var_idx, data):\n \"\"\"\n Get data from an array for a single variable only.\n\n Args:\n var_idx: Index of variable we want data for.\n data (shape (batch_size, variable_count)): Array to get variable info from.\n\n Returns:\n var_data (shape (observed_count, processed_dim)): Values only for\n the corresponding variable.\n \"\"\"\n return data[:, self.processed_cols[var_idx]]\n\n def get_variables_to_observe(self, data_mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Return a boolean tensor of length num_variables, where each element indicates whether the corresponding variable\n can be queried during active learning (i.e. the variable is queriable and has at least one observed value in\n the data).\n Args:\n data_mask (shape (batch_size, num_processed_cols)): Processed mask\n\n Returns:\n torch.Tensor (shape (variable_count,)): True where it's a query-able variable and we have at least one\n observed value\n \"\"\"\n cols_with_data = data_mask.sum(dim=0).to(torch.bool)\n\n # data_mask may have multiple columns for a single variable, if it's a categorical variable. Pick first entry per variable\n ii = torch.tensor([cols[0] for cols in self.processed_cols], dtype=torch.long, device=cols_with_data.device)\n cols_with_data = torch.index_select(cols_with_data, 0, ii)\n is_query_id = torch.zeros(len(self), dtype=torch.bool, device=cols_with_data.device)\n is_query_id[\n tuple(self.query_var_idxs),\n ] = True\n return is_query_id * cols_with_data\n\n def _deduplicate_names(self):\n # Produce warning if var name is reused and add an increasing integer to the end until it is unique.\n var_names = set()\n for var in self._all_variables:\n i = 2\n original_name = var.name\n while var.name in var_names:\n new_name = f\"{original_name}_{i}\"\n var.name = new_name\n i += 1\n if var.name != original_name:\n # Do the warning in a separate block to the while loop so that we only raise one warning if we have to\n # try appending several different integers to the name.\n warnings.warn(\n f\"Name {original_name} has already been used, renaming to {var.name}\",\n UserWarning,\n )\n var_names.add(var.name)\n\n # TODO: Maybe create Variables.Utils for methods like the below one\n @staticmethod\n def create_empty_data(variables: Variables) -> np.ndarray:\n var_count = len(variables)\n empty_data = np.zeros((1, var_count), dtype=object)\n for i in range(var_count):\n if variables[i].type_ == \"text\":\n empty_data[:, i] = \"empty str\"\n return empty_data" }, { "identifier": "BayesDAGNonLinear", "path": "src/causica/models/bayesdag/bayesdag_nonlinear.py", "snippet": "class BayesDAGNonLinear(BayesDAG):\n \"\"\"\n Approximate Bayesian inference over the graph in a Gaussian nonlinear ANM based on the BayesDAG result. Any DAG G is represented as G = W * Step(grad (p))\n where W is a discrete matrix W in {0, 1} ^ {d, d} and p in R^d. Inference over DAGs G then corresponds to inference over W and p\n as the transformation is determinstic. This can be converted to inference over W and p by using the Gumbel-Sinkhorn trick.\n \"\"\"\n\n def __init__(\n self,\n model_id: str,\n variables: Variables,\n save_dir: str,\n device: torch.device,\n lambda_sparse: float = 1.0,\n norm_layers: bool = False,\n res_connection: bool = False,\n num_chains: int = 10,\n sinkhorn_n_iter: int = 3000,\n scale_noise: float = 0.1,\n scale_noise_p: float = 1.0,\n model_type: str = \"nonlinear\",\n sparse_init: bool = False,\n input_perm: bool = False,\n VI_norm: bool = False,\n ):\n \"\"\"\n Args:\n model_id: Unique model ID for this model instance of training.\n variables: Information about variables/features used by this model.\n save_dir: Location to save any information about this model, including training data.\n device: Device to load model to.\n lambda_sparse: Coefficient for the prior term that enforces sparsity.\n norm_layers: bool indicating whether all MLPs should use layer norm\n res_connection: bool indicating whether all MLPs should use layer norm\n num_chains: Number of chains to use for SG-MCMC\n sinkhorn_n_iter: Number of iterations for Sinkhorn\n scale_noise: Hyperparameter of the Adam SGMCMC for sampling theta\n scale_noise_p: Hyperparameter of the Adam SGMCMC for sampling p\n model_type: Type of model to use. Admits {\"linear\", \"nonlinear\"}\n sparse_init: Whether to initialize the W matrix to be sparse\n input_perm: Whether to use the input permutation to generate the adjacency matrix\n VI_norm: Whether to use layer norm in the helper network\n \"\"\"\n super().__init__(\n model_id=model_id,\n variables=variables,\n save_dir=save_dir,\n device=device,\n lambda_sparse=lambda_sparse,\n base_distribution_type=\"gaussian\",\n norm_layers=norm_layers,\n res_connection=res_connection,\n num_chains=num_chains,\n scale_noise=scale_noise,\n scale_noise_p=scale_noise_p,\n model_type=model_type,\n )\n self.input_perm = input_perm\n self.sparse_init = sparse_init\n self.VI_norm = VI_norm\n if self.sparse_init:\n self.logit_const = -1\n else:\n self.logit_const = 0\n \n if self.input_perm:\n hidden_size = 128\n layer_norm = partial(torch.nn.LayerNorm, elementwise_affine=True)\n self.helper_network = generate_fully_connected(\n input_dim=self.num_nodes*self.num_nodes,\n output_dim=(self.num_nodes * self.num_nodes),\n hidden_dims=[hidden_size, hidden_size],\n non_linearity=nn.ReLU,\n activation=None,\n device=device,\n res_connection=True,\n normalization=layer_norm\n )\n else:\n layer_norm = partial(torch.nn.LayerNorm, elementwise_affine=True)\n hidden_size = 48\n self.helper_network = generate_fully_connected(\n input_dim=self.num_nodes,\n output_dim=(self.num_nodes * self.num_nodes),\n hidden_dims=[hidden_size, hidden_size],\n non_linearity=nn.ReLU,\n activation=None,\n device=device,\n res_connection=True,\n normalization=layer_norm if self.VI_norm else None\n )\n \n if self.VI_norm:\n self.layer_norm = nn.LayerNorm(self.num_nodes, elementwise_affine=False)\n else:\n self.layer_norm = lambda x: x\n \n self.num_chains = num_chains\n self.o_scale = 10\n self.p_scale = 0.01\n self.p_buffer = deque(maxlen=5000)\n self.weights_buffer = deque(maxlen=5000)\n self.buffers_buffer = deque(maxlen=5000)\n self.p_steps = 0\n self.weights_steps = 0\n self.sinkhorn_n_iter = sinkhorn_n_iter\n self.num_burnin_steps = 1\n self.p = self.p_scale * torch.randn((self.num_chains, self.num_nodes), device=self.device)\n self.p.requires_grad = True\n\n self.p_opt = Adam_SGMCMC([self.p],\n lr=0.0003,\n betas=(0.9,0.99),\n dataset_size=5000,\n scale_noise=self.scale_noise_p,\n )\n #\n self.W_opt = torch.optim.Adam(list(self.helper_network.parameters())+[self.likelihoods[\"continuous\"].logscale_base] , lr=0.005)\n self.weights_opt = Adam_SGMCMC(self.icgnn_params,\n lr=0.0003,\n betas=(0.9,0.99),\n dataset_size=5000,\n scale_noise=self.scale_noise,\n )\n\n @classmethod\n def name(cls) -> str:\n return \"bayesdag_nonlinear\"\n\n def compute_perm_hard(self, p:torch.Tensor):\n def log_sinkhorn_norm(log_alpha: torch.Tensor, tol= 1e-3):\n for _ in range(self.sinkhorn_n_iter):\n log_alpha = log_alpha - torch.logsumexp(log_alpha, -1, keepdim=True)\n log_alpha = log_alpha - torch.logsumexp(log_alpha, -2, keepdim=True)\n exp_log_alpha = log_alpha.exp()\n if torch.abs(1.-exp_log_alpha.sum(-1)).max()<tol and torch.abs(1.-exp_log_alpha.sum(-2)).max()<tol:\n break\n return log_alpha.exp()\n\n O = self.o_scale * torch.arange(1, self.num_nodes+1, dtype=p.dtype, device=p.device).expand(1, -1)\n X = torch.matmul(p.unsqueeze(-1), O.unsqueeze(-2))\n \n perm = log_sinkhorn_norm(X / 0.2)\n \n perm_matrix = torch.zeros_like(perm)\n for i in range(perm.shape[0]):\n row_ind, col_ind = linear_sum_assignment(-perm[i].squeeze().cpu().detach().numpy())\n perm_indices = list(zip(row_ind, col_ind)) \n perm_indices = [(i,) + idx for idx in perm_indices]\n perm_indices = tuple(zip(*perm_indices))\n perm_matrix[perm_indices] = 1.0\n perm_matrix_hard = (perm_matrix - perm).detach() + perm # Straight Through\n return perm_matrix_hard, perm\n \n def transform_adj(self, p: torch.Tensor, detach_W: bool = False):\n \"\"\"\n Takes in p and returns the adjacency matrix G = W * Step(grad (p)), equivalent to doing G = W* [sigma(p) x L x sigma(p)^T]\n See Theorem 3.2 in https://arxiv.org/abs/2307.13917.\n Args:\n p: Tensor of shape (num_chains, num_nodes) representing the p vector\n detach_W: Whether to detach the W matrix from the computation graph\n \"\"\"\n perm_matrix_hard, perm = self.compute_perm_hard(p)\n \n if self.input_perm:\n helper_input = perm_matrix_hard.view(perm_matrix_hard.shape[0],-1)\n else:\n helper_input = self.layer_norm(p)\n\n W_vec_ = torch.distributions.RelaxedBernoulli(logits=self.helper_network(helper_input)+self.logit_const, temperature=0.2).rsample()\n if detach_W:\n W_vec_.detach()\n W_vec_hard = W_vec_.round()\n W_vec = (W_vec_hard - W_vec_).detach() + W_vec_\n W = W_vec.reshape(perm.shape[0], self.num_nodes, self.num_nodes)\n full_lower = torch.ones(perm.shape[0], int((self.num_nodes - 1) * self.num_nodes / 2)).to(p.device)\n adj_matrix = W * torch.matmul(\n torch.matmul(perm_matrix_hard, fill_triangular(full_lower)), perm_matrix_hard.transpose(-1, -2)\n )\n\n return adj_matrix\n \n def extract_icgnn_weights(self, use_param_weights, num_particles):\n if use_param_weights or len(self.weights_buffer)==0:\n params, buffers = self.icgnn_params, self.icgnn_buffers\n else:\n tuple_tensor=tuple(self.weights_buffer.pop() for _ in range(num_particles))\n params = transpose_stack(tuple_tensor)\n tuple_tensor=tuple(self.buffers_buffer.pop() for _ in range(num_particles))\n buffers = transpose_stack(tuple_tensor)\n return params, buffers\n\n def data_likelihood(self, X: torch.Tensor, A_samples: torch.Tensor, dataset_size:int, use_param_weights=False, return_prior: bool = False):\n \"\"\"\n Computes the log likelihood of the data under the model, i.e. p(X | G:={w,p}, theta, sigma)\n Args:\n X: Tensor of shape (batch_size, num_nodes) representing the data\n A_samples: Tensor of shape (num_chains, num_nodes, num_nodes) representing the adjacency matrix\n return_prior: Whether to return the prior term as well\n \"\"\"\n params, buffers = self.extract_icgnn_weights(use_param_weights=use_param_weights, num_particles=A_samples.shape[0])\n params_flat = torch.cat([param.view(A_samples.shape[0],-1) for param in params], dim=-1)\n if return_prior:\n theta_prior = 1/ dataset_size * (torch.distributions.normal.Normal(loc=torch.tensor(0.), scale=torch.tensor(1.)).log_prob(\n params_flat) ).sum(-1) # num_chains\n p_prior = 1/dataset_size * (torch.distributions.normal.Normal(loc=torch.tensor(0.), scale=torch.tensor(0.1)).log_prob(self.p)).sum(-1) # chain\n\n sparse_loss = -(1/dataset_size) * self.lambda_sparse* A_samples.abs().sum(-1).sum(-1) # chain\n\n W_adj = A_samples * vmap(self.ICGNN.get_weighted_adjacency)(params, buffers)\n predict = vmap(self.ICGNN.predict, in_dims=(0, 0, None, 0))(params, buffers, X, W_adj)# N x num_chain x D #chain x N x 1 x D\n\n log_p_base = self._log_prob(\n X, predict, W=A_samples if self.base_distribution_type == \"conditional_spline\" else None\n ).transpose(0,1) # N x num_chains\n if return_prior:\n return log_p_base, theta_prior, p_prior, sparse_loss\n\n return log_p_base\n\n def compute_W_prior_entropy(self, p: torch.Tensor, dataset_size: int):\n \"\"\"\n Computes the prior and entropy terms for the W matrix (for VI).\n Args:\n p: Tensor of shape (num_chains, num_nodes) representing the p vector\n dataset_size: Size of the dataset\n \"\"\"\n if self.input_perm:\n perm_matrix_hard, _ = self.compute_perm_hard(p)\n helper_input = perm_matrix_hard.view(perm_matrix_hard.shape[0],-1)\n logits = self.helper_network(helper_input)\n else:\n logits = self.helper_network(self.layer_norm(p)) # chain x( D x D)\n\n # draw hard samples\n W_vec_ = torch.distributions.RelaxedBernoulli(logits=logits+self.logit_const, temperature=0.2).rsample()\n W_vec_hard = W_vec_.round()\n W_vec = (W_vec_hard - W_vec_).detach() + W_vec_\n W = W_vec.reshape(p.shape[0], self.num_nodes, self.num_nodes) # chain x D x D\n prior = 1./dataset_size*torch.distributions.Bernoulli(probs=torch.tensor(0.5).to(device=W.device)).log_prob(W).sum(-1).sum(-1) # chain\n # compute entropy\n entropy = 1./dataset_size*torch.distributions.Bernoulli(logits=logits).entropy().sum(-1).sum(-1) # chain\n return prior, entropy\n\n def process_dataset(\n self,\n dataset: Dataset,\n train_config_dict: Optional[Dict[str, Any]] = None,\n variables: Optional[Variables] = None,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Generates the training data and mask.\n Args:\n dataset: Dataset to use.\n train_config_dict: Dictionary with training hyperparameters.\n variables: Information about variables/features used by this model.\n Returns:\n Tuple with data and mask arrays.\n \"\"\"\n if train_config_dict is None:\n train_config_dict = {}\n if variables is None:\n variables = self.variables\n\n self.data_processor = DataProcessor(\n variables,\n unit_scale_continuous=False,\n standardize_data_mean=train_config_dict.get(\"standardize_data_mean\", False),\n standardize_data_std=train_config_dict.get(\"standardize_data_std\", False),\n )\n processed_dataset = self.data_processor.process_dataset(dataset)\n\n data, mask = processed_dataset.train_data_and_mask\n data = data.astype(np.float32)\n return data, mask\n\n def _posterior_p_sample(self, data: torch.Tensor, dataset_size:int, num_samples: int = 1, writer: Optional[SummaryWriter] = None, interval:int=1) -> torch.Tensor:\n \"\"\"\n SG-MCMC step for sampling p.\n Args:\n data: Tensor of shape (batch_size, num_nodes) representing the data\n num_samples: Number of samples to return.\n writer: Optional tensorboard SummaryWriter.\n dataset_size: Size of the dataset\n interval: Number of steps between logging to tensorboard\n \"\"\"\n num_steps = num_samples * interval\n if self.p_steps < self.num_burnin_steps:\n num_steps = self.num_burnin_steps - self.p_steps + num_samples\n total_loss = 0.\n for cur_step in range(num_steps):\n self.weights_opt.zero_grad()\n self.W_opt.zero_grad()\n self.p_opt.zero_grad()\n A_samples = self.transform_adj(self.p, detach_W=False)\n ll_eltwise, _, p_prior, sparse_loss = self.data_likelihood(data, A_samples, dataset_size=dataset_size, use_param_weights=True, return_prior=True)\n loss = -(ll_eltwise+p_prior+1*sparse_loss).mean() # 1 averaged over num_chains, batch sizes\n total_loss+= loss.detach()\n loss.backward()\n self.p_opt.step()\n if writer is not None:\n for jj in range(self.p.shape[0]):\n writer_dict = {}\n for j in range(self.p.shape[-1]):\n writer_dict[str(j)] = self.p[jj, j].detach().cpu().numpy()\n writer.add_scalars(f\"p_chain_{jj}\", writer_dict, self.p_steps) # tensorboard\n self.p_steps += 1\n \n if self.p_steps >= self.num_burnin_steps and (cur_step+1)%interval == 0:\n for i in range(len(self.p)):\n self.p_buffer.append(self.p[i].detach().clone())\n\n return total_loss/num_steps\n \n def _posterior_weights_sample(self, data: torch.Tensor, dataset_size:int, num_samples: int = 1) -> torch.Tensor:\n \"\"\"\n SG-MCMC step for sampling the weights.\n Args:\n data: Tensor of shape (batch_size, num_nodes) representing the data\n num_samples: Number of samples to return.\n dataset_size: Size of the dataset\n \"\"\"\n num_steps = num_samples\n if self.weights_steps < self.num_burnin_steps:\n num_steps = self.num_burnin_steps - self.weights_steps + num_samples\n\n total_loss = 0.\n for _ in range(num_steps):\n self.weights_opt.zero_grad()\n self.W_opt.zero_grad()\n self.p_opt.zero_grad()\n A_samples = self.transform_adj(self.p)\n ll_eltwise, theta_prior, _, sparse_loss = self.data_likelihood(data, A_samples, dataset_size=dataset_size, use_param_weights=True, return_prior=True)# batch x chain, num_chain\n \n loss = -(ll_eltwise+theta_prior+sparse_loss).mean() #[]\n total_loss += loss.detach()\n loss.backward()\n self.weights_opt.step()\n self.weights_steps += 1\n\n if self.weights_steps >= self.num_burnin_steps:\n for i in range(self.num_chains):\n self.weights_buffer.append(untranspose_stack(self.icgnn_params, i, clone=True))\n self.buffers_buffer.append(untranspose_stack(self.icgnn_buffers, i, clone=True))\n return total_loss/num_steps\n \n def _train_helper_network(self, data: torch.Tensor, dataset_size, num_iters: int = 1)-> torch.Tensor:\n \"\"\"\n VI step for training the helper network to generate W conditioned on p.\n Args:\n data: Tensor of shape (batch_size, num_nodes) representing the data\n num_iters: Number of iterations to train for.\n dataset_size: Size of the dataset\n \"\"\"\n total_loss = 0.\n for _ in range(num_iters):\n self.weights_opt.zero_grad()\n self.W_opt.zero_grad()\n self.p_opt.zero_grad()\n A_samples = self.transform_adj(self.p)\n ll_eltwise,_,_,sparse_loss = self.data_likelihood(data, A_samples, dataset_size=dataset_size, use_param_weights=True, return_prior=True) # batch x chain\n prior, entropy = self.compute_W_prior_entropy(self.p, dataset_size=dataset_size) # chain\n loss = -(ll_eltwise+prior + entropy+sparse_loss).mean() #\n total_loss += loss.detach()\n loss.backward()\n self.W_opt.step()\n return total_loss/num_iters\n\n def run_train(\n self,\n dataset: Dataset,\n train_config_dict: Optional[Dict[str, Any]] = None,\n report_progress_callback: Optional[Callable[[str, int, int], None]] = None,\n ) -> None:\n \"\"\"\n Runs training.\n Args:\n dataset: Dataset to use.\n train_config_dict: Dictionary with training hyperparameters.\n report_progress_callback: Optional callback function to report training progress.\n \"\"\"\n if train_config_dict is None:\n train_config_dict = {}\n dataloader, _ = self._create_dataset_for_bayesdag(dataset, train_config_dict)\n\n # initialise logging machinery\n train_output_dir = os.path.join(self.save_dir, \"train_output\")\n os.makedirs(train_output_dir, exist_ok=True)\n log_path = os.path.join(train_output_dir, \"summary\")\n writer = SummaryWriter(log_path, flush_secs=1)\n\n print(\"Saving logs to\", log_path, flush=True)\n tracker_loss_terms: Dict = defaultdict(list)\n \n self.dataset_size = dataset._train_data.shape[0]\n self.train_data, _ = to_tensors(*dataset.train_data_and_mask,device=self.device)\n\n best_loss = np.inf\n self.p_opt.update_dataset_size(self.dataset_size)\n self.weights_opt.update_dataset_size(self.dataset_size)\n # Outer optimization loop\n inner_opt_count = 0\n prev_best = 0\n for step in range(train_config_dict[\"max_epochs\"]):\n loss_epoch = 0.\n \n for (x, _) in dataloader:\n p_loss = self._posterior_p_sample(data=x, dataset_size=self.dataset_size, num_samples=1, writer=writer)\n \n W_loss = self._train_helper_network(data=x, dataset_size=self.dataset_size,num_iters=1)\n weights_loss = self._posterior_weights_sample(data=x, dataset_size=self.dataset_size ,num_samples=1)\n loss = (p_loss+W_loss+weights_loss)/3\n loss_epoch += loss\n tracker_loss_terms[\"loss\"].append(loss.mean().item())\n tracker_loss_terms[\"p_loss\"].append(p_loss.item())\n tracker_loss_terms[\"W_loss\"].append(W_loss.item())\n tracker_loss_terms[\"weights_loss\"].append(weights_loss.item())\n inner_opt_count+=1\n if loss_epoch.item() < best_loss:\n best_loss = loss_epoch.item()\n print(\"New best model found. Saving Checkpoint\")\n prev_best = 0\n self.save(best=True)\n else:\n prev_best +=1\n if step % 4 == 0:\n if (\n isinstance(dataset, CausalDataset)\n and dataset.has_adjacency_data_matrix\n and not hasattr(self, \"latent_variables\")\n ):\n \n adj_metrics = self.evaluate_metrics(dataset=dataset)\n else:\n adj_metrics = None\n self.print_tracker_sgld(step, tracker_loss_terms, adj_metrics)\n \n _log_epoch_metrics(writer=writer, tracker_loss_terms=tracker_loss_terms, adj_metrics=adj_metrics, step=step)\n\n \n def print_tracker_sgld(self, step: int, tracker: dict, adj_metrics: Optional[dict]) -> None:\n \"\"\"Prints formatted contents of loss terms that are being tracked.\n Args:\n inner_step: Current step.\n tracker: Dictionary with loss terms.\n adj_metrics: Dictionary with adjacency matrix discovery metrics.\n \"\"\"\n tracker_copy = tracker.copy()\n\n loss = np.mean(tracker_copy.pop(\"loss\")[-100:])\n if adj_metrics is not None:\n adj_metrics_cp = adj_metrics.copy()\n shd = adj_metrics_cp.pop(\"shd\")\n nnz = adj_metrics_cp.pop(\"nnz\")\n cpdag_shd = adj_metrics_cp.pop(\"cpdag-shd\", float(\"nan\"))\n nll_val = adj_metrics_cp.pop(\"nll_val\", float(\"nan\"))\n nll_train = adj_metrics_cp.pop(\"nll_train\", float(\"nan\"))\n o_fscore = adj_metrics_cp.pop(\"orientation_fscore\", float(\"nan\"))\n mmd_tp = adj_metrics_cp.pop(\"mmd-tp\", float(\"nan\"))\n else:\n shd = float(\"nan\")\n nnz = float(\"nan\")\n nll_val = float(\"nan\")\n cpdag_shd = float(\"nan\")\n nll_train = float(\"nan\")\n mmd_tp =float(\"nan\")\n o_fscore = float(\"nan\")\n\n print(\n f\"Step: {step}, loss: {loss:.2f}, shd: {shd:.2f}, o_fscore:{o_fscore:.2f}, cpdag-shd: {cpdag_shd:.2f} nnz: {nnz:.2f} NLL-Validation: {nll_val:.4f} NLL-Train: {nll_train:.4f} MMD-TP: {mmd_tp:.4f} P_grad_norm:{torch.norm(self.p.grad).item()}\", flush=True\n )\n \n \n def get_adj_matrix_tensor(\n self,\n samples: int = 5,\n ) -> torch.Tensor: \n \"\"\"\n Returns the adjacency matrix (or several) as a torch tensor.\n Args:\n samples: Number of samples to return.\n \"\"\"\n batch_size = 500\n num_steps = int(np.ceil(samples/self.num_chains))\n for _ in range(num_steps):\n indices = torch.randperm(self.train_data.shape[0])[:batch_size]\n input_data = self.train_data[indices]\n self._posterior_p_sample(data=input_data, num_samples=1, dataset_size=self.dataset_size, interval=1)\n self._posterior_weights_sample(data=input_data,dataset_size=self.dataset_size, num_samples=1)\n\n p_vec= []\n for _ in range(samples):\n p_vec.append(self.p_buffer.pop())\n p_eval = torch.stack(p_vec)\n adj_matrix = self.transform_adj(p_eval) != 0.0\n return adj_matrix, torch.ones(samples)\n\n def get_adj_matrix(\n self,\n samples: int = 100,\n squeeze: bool = False,\n ) -> np.ndarray:\n \"\"\"\n Returns the adjacency matrix (or several) as a numpy array.\n Args:\n samples: Number of samples to return.\n squeeze: Whether to squeeze the first dimension if samples == 1.\n \"\"\"\n adj_matrix, is_dag = self.get_adj_matrix_tensor(samples=samples)\n if squeeze and samples == 1:\n adj_matrix = adj_matrix.squeeze(0)\n return adj_matrix.detach().cpu().numpy().astype(np.float64), is_dag.detach().cpu().numpy().astype(bool)" } ]
import torch from ...datasets.variables import Variables from .bayesdag_nonlinear import BayesDAGNonLinear
13,360
from __future__ import annotations class BayesDAGLinear(BayesDAGNonLinear): """ Approximate Bayesian inference over the graph in a Gaussian linear ANM based on the BayesDAG result. Any DAG G is represented as G = W * Step(grad (p)) where W is a discrete matrix W in {0, 1} ^ {d, d} and p in R^d. Inference over DAGs G then corresponds to inference over W and p as the transformation is determinstic. This can be converted to inference over W and p by using the Gumbel-Sinkhorn trick. """ def __init__( self, model_id: str,
from __future__ import annotations class BayesDAGLinear(BayesDAGNonLinear): """ Approximate Bayesian inference over the graph in a Gaussian linear ANM based on the BayesDAG result. Any DAG G is represented as G = W * Step(grad (p)) where W is a discrete matrix W in {0, 1} ^ {d, d} and p in R^d. Inference over DAGs G then corresponds to inference over W and p as the transformation is determinstic. This can be converted to inference over W and p by using the Gumbel-Sinkhorn trick. """ def __init__( self, model_id: str,
variables: Variables,
0
2023-11-21 12:55:08+00:00
16k
ChenyangGao/python-epub3
epub3/epub.py
[ { "identifier": "File", "path": "epub3/util/file.py", "snippet": "class File:\n __slots__ = (\"path\", \"fs\", \"open\", \"open_modes\", \"_getattr\")\n ALL_MODES = frozenset(\"rwxab+\")\n\n def __init__(\n self, \n /, \n path=None, \n fs=None, \n open_modes=None, \n ):\n super().__setattr__(\"path\", path)\n super().__setattr__(\"fs\", fs)\n self._init_open(path, fs, open_modes)\n\n def __init_subclass__(cls, /, **kwargs):\n raise TypeError(\"subclassing is not allowed\")\n\n def __repr__(self, /) -> str:\n cls = type(self)\n module = cls.__module__\n name = cls.__qualname__\n if module != \"__main__\":\n name = module + \".\" + name\n return \"%s(%s)\" % (name, \", \".join(\"%s=%r\" % (k, getattr(self, k)) for k in cls.__slots__))\n\n def __delattr__(self, attr):\n raise TypeError(\"can't delete any attributes\")\n\n def __getattr__(self, attr, /):\n try:\n return self._getattr(attr)\n except Exception as e:\n raise AttributeError(attr) from e\n\n def __setattr__(self, attr, value, /):\n raise TypeError(\"can't set any attributes\")\n\n def _init_open(self, path, fs, open_modes, /):\n cls = type(self)\n code, file_open = cls._get_open(fs)\n use_io_open = file_open is io.open\n if file_open is None:\n if isinstance(path, Path):\n file_open = path.open\n use_io_open = True\n code = 0\n else:\n code, file_open = cls._get_open(path)\n if file_open is None:\n if not isinstance(path, (bytes, str, PathLike)):\n raise TypeError(\"unable to determine how to open the file\")\n file_open = partial(io.open, path)\n use_io_open = True\n if code < 0:\n code = 0\n use_fs = False\n else:\n file_open = partial(file_open, path)\n use_fs = True\n if code == 0:\n def _getattr0(attr):\n try:\n return getattr(os, attr)\n except AttributeError:\n try:\n return getattr(ospath, attr)\n except AttributeError:\n return getattr(shutil, attr)\n elif code == 1:\n _getattr0 = partial(getattr, fs if use_fs else path)\n elif code == 2:\n _getattr0 = (fs if use_fs else path).__getitem__\n if use_fs:\n def _getattr(attr, default=undefined, /):\n try:\n val = _getattr0(attr)\n except (LookupError, AttributeError):\n if default is undefined:\n raise\n return default\n if not callable(val):\n return val\n if isclass(val) or isinstance(val, staticmethod):\n return val\n return partial(val, path)\n else:\n def _getattr(attr, default=undefined, /):\n try:\n return _getattr0(attr)\n except (LookupError, AttributeError):\n if default is undefined:\n raise\n return default\n default_open_modes = _getattr(\"open_modes\", None)\n if default_open_modes is not None:\n open_modes = default_open_modes\n super().__setattr__(\"_getattr\", _getattr)\n open_keywords = cls._open_keywords(file_open)\n if \"mode\" not in open_keywords or open_modes == \"\":\n open_modes = frozenset()\n elif open_modes is None:\n open_modes = type(self).ALL_MODES\n elif use_io_open:\n open_modes = frozenset(open_modes) & type(self).ALL_MODES | frozenset(\"rb\")\n else:\n open_modes = frozenset(open_modes) & type(self).ALL_MODES | frozenset(\"r\")\n super().__setattr__(\"open_modes\", open_modes)\n amode = frozenset(\"rwxa+\")\n def open(\n mode=\"r\", \n buffering=-1, \n encoding=None, \n errors=None, \n newline=None, \n **kwargs, \n ):\n if mode not in OPEN_MODES:\n raise ValueError(f\"invalid open mode: {mode!r}\")\n binary_mode = \"b\" in mode\n if mode == \"r\":\n pass\n elif not open_modes:\n if \"r\" not in mode or \"+\" in mode:\n raise ValueError(f\"open mode unsupported: {mode!r}\")\n mode = \"r\"\n else:\n if open_modes:\n if amode & set(mode) - open_modes:\n raise ValueError(f\"open mode unsupported: {mode!r}\")\n mode = next(m for m in \"rwax\" if m in mode) + \"+\"[:\"+\" in mode]\n if open_modes:\n if \"b\" in open_modes:\n mode += \"b\"\n if open_keywords is not CONTAINS_ALL:\n kwargs = {k: v for k, v in kwargs.items() if k in open_keywords}\n if open_modes:\n kwargs[\"mode\"] = mode\n if \"buffering\" in open_keywords:\n kwargs[\"buffering\"] = buffering\n file = file_open(**kwargs)\n else:\n file = file_open(**kwargs)\n if binary_mode and buffering == 0:\n return file\n bufsize = buffering if buffering > 1 else DEFAULT_BUFFER_SIZE\n if \"+\" in mode:\n file = BufferedRandom(file, bufsize)\n elif \"r\" in mode:\n file = BufferedReader(file, bufsize)\n else:\n file = BufferedWriter(file, bufsize)\n if binary_mode:\n return file\n return TextIOWrapper(\n file, \n encoding=encoding, \n errors=errors, \n newline=newline, \n line_buffering=buffering==1, \n )\n super().__setattr__(\"open\", open)\n\n @staticmethod\n def _get_open(f, /):\n if f is None:\n return 0, None\n if callable(open := getattr(f, \"open\", None)):\n return 1, open\n try:\n if callable(open := f[\"open\"]):\n return 2, open\n except (TypeError, LookupError):\n if callable(f):\n return 3, f\n return -1, None\n\n @staticmethod\n def _open_keywords(open, /):\n params = signature(open).parameters\n if params:\n names = []\n for name, param in reversed(params.items()):\n if param.kind not in (POSITIONAL_OR_KEYWORD, KEYWORD_ONLY):\n break\n names.append(name)\n if param.kind is VAR_KEYWORD:\n return CONTAINS_ALL\n return frozenset(names)\n return frozenset()\n\n def check_open_mode(self, mode=\"r\", /):\n if mode not in OPEN_MODES:\n return False\n if mode == \"r\":\n return True\n open_modes = self.open_modes\n if not open_modes:\n if \"r\" not in mode or \"+\" in mode:\n return False\n else:\n if open_modes and frozenset(\"rwxa+\") & set(mode) - open_modes:\n return False\n return True" }, { "identifier": "RootFS", "path": "epub3/util/file.py", "snippet": "class RootFS:\n\n def __init__(self, root=None, /, joinpath=None):\n none_root = root is None\n if not none_root and callable(open := getattr(root, \"open\", None)):\n _getattr = partial(getattr, root)\n elif not none_root and callable(open := root[\"open\"]):\n _getattr = root.__getitem__\n elif none_root or isinstance(root, (bytes, str, PathLike)):\n self._fs = None\n if root is None:\n self._root = os.getcwd()\n else:\n self._root = ospath.realpath(root)\n if ospath.isfile(root):\n raise NotADirectoryError(errno.ENOTDIR, root)\n self._joinpath = ospath.join\n self._open = io.open\n return\n if joinpath is None:\n joinpath = get_any_callable(_getattr, \"joinpath\", \"join\") or posixpath.join\n self._fs = root\n self._root = \"\"\n self._getattr = _getattr\n self._joinpath = joinpath\n self._open = open\n\n def __repr__(self, /):\n return f\"<{type(self).__qualname__}({self._root!r}) at {hex(id(self))}>\"\n\n def _getattr(self, attr, /):\n try:\n return getattr(os, attr)\n except AttributeError:\n try:\n return getattr(ospath, attr)\n except AttributeError:\n return getattr(shutil, attr)\n\n def __getattr__(self, attr, /):\n try:\n val = self._getattr(attr)\n except (AttributeError, LookupError) as e:\n raise AttributeError(attr) from e\n if not callable(val):\n return val\n if isclass(val) or isinstance(val, staticmethod):\n return val\n def wrapper(name, /, *args, **kwargs):\n return val(self.joinpath(name), *args, **kwargs)\n return update_wrapper(wrapper, val)\n\n @property\n def name(self, /):\n return self._root\n\n @property\n def root(self, /):\n return self._root\n\n def joinpath(self, /, *paths):\n return self._joinpath(self._root, *paths)\n\n def open(\n self, \n name, \n /, \n mode='r', \n buffering=-1, \n encoding=None, \n errors=None, \n newline=None, \n ):\n return self._open(\n self.joinpath(name), \n mode=mode, \n buffering=buffering, \n encoding=encoding, \n errors=errors, \n newline=newline, \n )" }, { "identifier": "TemporaryFS", "path": "epub3/util/file.py", "snippet": "class TemporaryFS(RootFS):\n\n def __init__(self, root=None, /, joinpath=None):\n none_root = root is None\n if not none_root and callable(open := getattr(root, \"open\", None)):\n _getattr = partial(getattr, root)\n elif not none_root and callable(open := root[\"open\"]):\n _getattr = root.__getitem__\n elif none_root or isinstance(root, (bytes, str, PathLike)):\n self._fs = None\n temdir = TemporaryDirectory(dir=root)\n self._root = temdir.name\n self._joinpath = ospath.join\n self._open = io.open\n self._cleanup = temdir.cleanup\n return\n else:\n raise TypeError(f\"can't get `open` method from: {fs!r}\")\n if joinpath is None:\n joinpath = get_any_callable(_getattr, \"joinpath\", \"join\") or posixpath.join\n self._fs = root\n self._root = root = \"\"\n self._getattr = _getattr\n self._joinpath = joinpath\n self.open = open\n remove = get_any_callable(_getattr, \"remove\", \"rm\")\n if remove is None:\n warn(f\"can't get `remove` and `rm` methods from: {fs!r}\")\n self.remove = lambda *args, **kwargs: None\n self._cleanup = lambda: None\n return\n self.remove = remove\n mkdir = get_any_callable(_getattr, \"mkdir\", \"makedir\")\n if mkdir is not None:\n name = str(uuid4())\n try:\n mkdir(name)\n except:\n warn(f\"can't make temporary directory: {name!r} on {fs!r}\")\n else:\n self._root = root = name\n if root:\n rmtree = get_any_callable(_getattr, \"rmtree\", \"removetree\")\n if rmtree is not None:\n def _open(path, *args, **kwargs):\n return open(joinpath(root, path), *args, **kwargs)\n self.open = update_wrapper(_open, open)\n def _remove(path):\n remove(joinpath(root, path))\n self.remove = update_wrapper(_remove, remove)\n self._cleanup = lambda: rmtree(root)\n return\n created = set()\n def _open(path, mode=\"r\", **kwargs):\n path = joinpath(root, path)\n file = open(path, mode=mode, **kwargs)\n if \"r\" not in mode:\n created.add(path)\n return file\n self.open = update_wrapper(_open, open)\n def _remove(path):\n path = joinpath(root, path)\n remove(path)\n created.discard(path)\n self.remove = update_wrapper(_remove, remove)\n rmdir = get_any_callable(_getattr, \"rmdir\", \"removedir\")\n def _cleanup():\n for path in tuple(created):\n try:\n remove(path)\n except:\n pass\n if root and rmdir is not None:\n try:\n rmdir(root)\n except:\n pass\n self._cleanup = _cleanup\n\n def __repr__(self, /):\n return f\"<{type(self).__qualname__}({self._fs!r}) {self._root!r} at {hex(id(self))}>\"\n\n def __del__(self, /):\n self.cleanup()\n\n def __enter__(self, /):\n return self\n\n def __exit__(self, exc, value, tb, /):\n self.cleanup()\n\n def cleanup(self, /):\n try:\n self._cleanup()\n except:\n pass" }, { "identifier": "OPEN_MODES", "path": "epub3/util/file.py", "snippet": "OPEN_MODES = frozenset(\n \"\".join(t1) \n for t0 in product(\"rwax\", (\"\", \"b\", \"t\"), (\"\", \"+\")) \n for t1 in permutations(t0, 3)\n)" }, { "identifier": "guess_media_type", "path": "epub3/util/helper.py", "snippet": "def guess_media_type(name: str, /, default: str = \"application/octet-stream\") -> str:\n return guess_type(name)[0] or default" }, { "identifier": "values", "path": "epub3/util/helper.py", "snippet": "def values(m, /):\n if isinstance(m, Mapping):\n try:\n return m.values()\n except Exception:\n return ValuesView(m)\n return m" }, { "identifier": "items", "path": "epub3/util/helper.py", "snippet": "def items(m, /):\n if isinstance(m, Mapping):\n try:\n return m.items()\n except Exception:\n return ItemsView(m)\n return m" }, { "identifier": "sup", "path": "epub3/util/helper.py", "snippet": "def sup(exists, x=1):\n \"\"\"Find the smallest available integer greater than or equal to `x`.\n\n :param exists: Determine if the value exists (unavailable), return True if it does.\n :param x: Start value.\n\n :return: The smallest integer greater than or equal to the initial value \n x for which calling exists returns False.\n \"\"\"\n δ = 1\n while exists(x):\n x += δ\n δ <<= 1\n if δ <= 2:\n return x\n δ >>= 2\n x -= δ\n while δ > 1:\n δ >>= 1\n if exists(x):\n x += δ\n else:\n x -= δ\n return x + exists(x)" }, { "identifier": "proxy_property", "path": "epub3/util/proxy.py", "snippet": "@overload\ndef proxy_property(fget: None, /, key: Optional[str] = \"\") -> Callable[[Callable], property]: ..." }, { "identifier": "ElementAttribProxy", "path": "epub3/util/proxy.py", "snippet": "class ElementAttribProxy(metaclass=CachedMeta):\n __const_keys__: tuple[str, ...] = ()\n __protected_keys__: tuple[str, ...] = ()\n __cache_check_key__ = lambda obj: isinstance(obj, Element)\n __cache_cls__ = WeakKeyDictionary if USE_BUILTIN_XML else WeakValueDictionary\n __wrap_class__: \"type[ElementAttribProxy]\"\n\n def __init__(self, root, /):\n self._root = root\n self._attrib = root.attrib\n if USE_BUILTIN_XML:\n self._nsmap = nsmap = {}\n else:\n self._nsmap = nsmap = root.nsmap\n if self.__const_keys__:\n self.__const_keys__ = frozenset(\n resolve_prefix(key, nsmap, NAMESPACES) for key in type(self).__const_keys__\n )\n if self.__protected_keys__:\n self.__protected_keys__ = frozenset(\n resolve_prefix(key, nsmap, NAMESPACES) for key in type(self).__protected_keys__\n )\n\n def __init_subclass__(\n cls, \n /, \n get_key=None, \n check_key=None, \n get_state=None, \n set_state=None, \n **kwargs, \n ):\n if callable(get_key):\n self.__cache_get_key__ = get_key\n if isclass(check_key) and issubclass(check_key, object) or type(check_key) is tuple:\n self.__cache_check_key__ = lambda obj, _t: isinstance(obj, _t)\n elif type(check_key) in (set, frozenset):\n self.__cache_check_key__ = check_key.__contains__\n elif callable(check_key):\n self.__cache_check_key__ = check_key\n if callable(get_state):\n self.__cache_get_state__ = get_state\n if callable(set_state):\n self.__cache_set_state__ = set_state\n namespaces = cls.__dict__\n const_keys = namespaces.get(\"__const_keys__\")\n if const_keys:\n for key in const_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key))\n protected_keys = namespaces.get(\"__protected_keys__\")\n if protected_keys:\n for key in protected_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key, setable=True))\n optional_keys = namespaces.get(\"__optional_keys__\")\n if optional_keys:\n for key in optional_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key, setable=True, delable=True))\n if \"__wrap_class__\" not in namespaces:\n for base_cls in cls.__mro__:\n if \"__wrap_class__\" in base_cls.__dict__:\n cls.__wrap_class__ = base_cls.__wrap_class__\n break\n elif cls.__dict__.get(\"__is_wrap_class__\"):\n cls.__wrap_class__ = base_cls\n break\n\n def __contains__(self, key, /):\n if not isinstance(key, str) or not key:\n return False\n return resolve_prefix(key, self._nsmap, NAMESPACES) in self._attrib\n\n def __delitem__(self, key, /):\n if isinstance(key, (int, slice)):\n del self._root[key]\n elif isinstance(key, str):\n if not key:\n raise ValueError(\"empty key not allowed\")\n if key in self.__const_keys__ or key in self.__protected_keys__:\n raise LookupError(f\"not allowed to delete key: {key}\")\n del self._attrib[key]\n else:\n raise TypeError(\"only accept `key` type: int, slice and str\")\n return self\n\n def __eq__(self, other, /):\n if type(self) is not type(other):\n return NotImplemented\n return self._root is other._root\n\n def __getitem__(self, key, /):\n if isinstance(key, str):\n if not key:\n raise ValueError(\"empty key not allowed\")\n return self._attrib[resolve_prefix(key, self._nsmap, NAMESPACES)]\n elif isinstance(key, (int, slice)):\n if isinstance(key, int):\n return type(self).wrap(self._root[key])\n return list(map(type(self).wrap, self._root[key]))\n else:\n raise TypeError(\"only accept `key` type: int, slice and str\")\n\n def __hash__(self, /):\n return hash(self._root)\n\n @PyLinq.streamify\n def __iter__(self, /):\n return iter(self._attrib)\n\n def __len__(self, /):\n return len(self._attrib)\n\n def __setitem__(self, key, value, /):\n if not isinstance(key, str):\n raise TypeError(\"only accept `key` type: `str`\")\n if not key:\n raise ValueError(\"empty key not allowed\")\n if value is None:\n self.pop(key, None)\n else:\n if key in self.__const_keys__:\n raise LookupError(f\"not allowed to set key: {key!r}\")\n self._attrib[key] = str(value)\n return self\n\n def __repr__(self, /):\n attrib = self._attrib\n attrib = f\", {attrib=!r}\" if attrib else \"\"\n return f\"<{type(self).__qualname__}(<{self._root.tag}>{attrib}) at {hex(id(self))}>\"\n\n @classmethod\n def wrap(cls, root, /):\n wrap_class_map = cls.__dict__.get(\"__wrap_class_map__\")\n if not wrap_class_map:\n return cls.__wrap_class__(root)\n for pred, wrap_class in wrap_class_map.items():\n if isinstance(pred, str):\n if pred.startswith(\"{*}\"):\n if pred[3:] == root.tag or root.tag.endswith(pred[2:]):\n return wrap_class(root)\n elif pred.startswith(\"{}\"):\n if pred[2:] == root.tag:\n return wrap_class(root)\n elif pred.endswith(\":*\"):\n if root.tag.startswith(pred[:-1]) or root.tag.startswith(resolve_prefix(pred[:-1], NAMESPACES)):\n return wrap_class(root)\n elif root.tag == pred or root.tag == resolve_prefix(pred, NAMESPACES):\n return wrap_class(root)\n elif isinstance(pred, Pattern):\n if pred.search(root.tag) is not None:\n return wrap_class(root)\n elif isinstance(pred, Container):\n if root.tag in pred:\n return wrap_class(root)\n elif callable(pred):\n if pred(root):\n return wrap_class(root)\n return cls.__wrap_class__(root)\n\n def getproxy(self, key, /):\n if not key:\n return\n key = resolve_prefix(key, self._nsmap, NAMESPACES)\n namespaces = type(self).__dict__\n const_keys = namespaces.get(\"__const_keys__\")\n protected_keys = namespaces.get(\"__protected_keys__\")\n setable = not (const_keys and key in const_keys)\n delable = setable and not (protected_keys and key in protected_keys)\n return auto_property(key, setable=setable, delable=delable).fget(self)\n\n @cached_property\n def attrib(self, /):\n return AttrInfoProxy(self)\n\n @property\n def nsmap(self, /):\n return self._nsmap\n\n @cached_property\n def info(self, /):\n return MappingProxyType({\"attrib\": self.attrib})\n\n @property\n def proxy(self, /):\n return self\n\n @PyLinq.streamify\n def iter(self, /):\n return map(type(self).wrap, self._root.iterfind(\"*\"))\n\n def list(self, /, mapfn=None):\n if mapfn is None:\n return list(self.iter())\n return list(map(mapfn, self.iter()))\n\n def keys(self, /):\n return self._attrib.keys()\n\n def values(self, /):\n return self._attrib.values()\n\n def items(self, /):\n return self._attrib.items()\n\n def clear(self, /):\n const_keys = self.__const_keys__\n protected_keys = self.__protected_keys__\n attrib = self._attrib\n if const_keys or protected_keys:\n for key in tuple(attrib):\n if key in const_keys or key in protected_keys:\n continue\n del attrib[key]\n else:\n attrib.clear()\n return self\n\n def get(self, key, /, default=None):\n try:\n return self._attrib[key]\n except LookupError:\n return default\n\n def pop(self, key, /, default=undefined):\n if key in self.__const_keys__ or key in self.__protected_keys__:\n raise LookupError(f\"not allowed to delete key: {key}\") \n try:\n r = self._attrib[key]\n except LookupError:\n if default is undefined:\n raise\n return default\n else:\n del self._attrib[key]\n return r\n\n def popitem(self, /):\n const_keys = self.__const_keys__\n protected_keys = self.__protected_keys__\n for key, val in reversed(self._attrib.items()):\n if not (key in const_keys or key in protected_keys):\n del self._attrib[key]\n return (key, val)\n raise LookupError(\"no items to pop\")\n\n def setdefault(self, key, /, default=\"\"):\n if not isinstance(key, str):\n raise TypeError(\"only accept `key` type: str\")\n try:\n return seself._attriblf[key]\n except LookupError:\n self._attrib[key] = default\n return default\n\n def sort(self, key=id, reverse=False, use_backend_element=False):\n if use_backend_element:\n self._root[:] = sorted(self._root, key=key, reverse=reverse)\n else:\n self._root[:] = (e._root for e in sorted(self.iter(), key=key, reverse=reverse))\n return self\n\n def merge(self, attrib=None, /, **attrs):\n if attrib:\n if attrs:\n attrib = dict(attrib, **attrs)\n else:\n attrib = attrs\n if attrib:\n el_set(self._root, attrib=attrib, namespaces=NAMESPACES, merge=True)\n return self\n\n def update(self, attrib=None, /, **attrs):\n const_keys = self.__const_keys__\n if attrib:\n if attrs:\n attrib = dict(attrib, **attrs)\n elif const_keys and (not isinstance(attrib, Mapping) or any(key in attrib for key in const_keys)):\n attrib = dict(attrib)\n else:\n const_keys = ()\n else:\n attrib = attrs\n if const_keys:\n for key in const_keys:\n attrib.pop(key, None)\n if attrib:\n el_set(self._root, attrib=attrib, namespaces=NAMESPACES, merge=False)\n return self" }, { "identifier": "ElementProxy", "path": "epub3/util/proxy.py", "snippet": "class ElementProxy(ElementAttribProxy):\n __is_wrap_class__ = True\n\n def __repr__(self, /):\n attrib = self._attrib\n attrib = f\", {attrib=!r}\" if attrib else \"\"\n text = self.text\n text = f\", {text=!r}\" if text and text.strip() else \"\"\n tail = self.tail\n tail = f\", {tail=!r}\" if tail and tail.strip() else \"\"\n return f\"<{type(self).__qualname__}(<{self._root.tag}>{attrib}{text}{tail}) at {hex(id(self))}>\"\n\n def getproxy(self, key=\"\", /):\n if not key:\n return auto_property(key, setable=True, delable=True).fget(self)\n return super().getproxy(key)\n\n @property\n def length(self, /):\n return len(self._root)\n\n @property\n def tag(self, /):\n return self._root.tag\n\n @property\n def text(self, /):\n return self._root.text\n\n @text.setter\n def text(self, text, /):\n self._root.text = None if text is None else str(text)\n\n @property\n def tail(self, /):\n return self._root.tail\n\n @tail.setter\n def tail(self, text, /):\n self._root.tail = None if text is None else str(text)\n\n @cached_property\n def info(self, /):\n return ElementInfoProxy(self)\n\n def clear(self, /):\n self._root.clear()\n return self\n\n def merge(self, attrib=None, /, text=None, tail=None, **attrs):\n super().merge(attrib, **attrs)\n el_set(self._root, text=text, tail=tail, namespaces=NAMESPACES, merge=True)\n return self\n\n def update(self, attrib=None, /, text=None, tail=None, **attrs):\n super().update(attrib, **attrs)\n el_set(self._root, text=text, tail=tail, namespaces=NAMESPACES, merge=False)\n return self\n\n def add(self, name, /, attrib=None, text=None, tail=None):\n return type(self).wrap(el_add(self._root, name=name, attrib=attrib, text=text, tail=tail, namespaces=NAMESPACES))\n\n def delete(self, path, /):\n if isinstance(path, ElementAttribProxy):\n try:\n self._root.remove(path._root)\n except:\n pass\n else:\n el_del(self._root, path, namespaces=NAMESPACES)\n return self\n\n def find(self, path, /):\n return next(self.iterfind(path), None)\n\n @PyLinq.streamify\n def iterfind(self, path, /):\n return map(type(self).wrap, el_iterfind(self._root, path, NAMESPACES))\n\n def set(\n self, \n path=None, \n /, \n name=None, \n attrib=None, \n text=None, \n tail=None, \n merge=False, \n ):\n el = el_set(\n self._root, \n path, \n name=name, \n attrib=attrib, \n text=text, \n tail=tail, \n namespaces=NAMESPACES, \n merge=merge, \n )\n if el is not None:\n return type(self).wrap(el)\n\n def setfind(\n self, \n name, \n /, \n find_attrib=None, \n attrib=None, \n text=None, \n tail=None, \n merge=False, \n delete=False, \n auto_add=False, \n ):\n el = el_setfind(\n self._root, \n name=name, \n find_attrib=find_attrib, \n attrib=attrib, \n text=text, \n tail=tail, \n namespaces=NAMESPACES, \n merge=merge, \n delete=delete, \n auto_add=auto_add, \n )\n if el is not None:\n return type(self).wrap(el)" }, { "identifier": "NAMESPACES", "path": "epub3/util/proxy.py", "snippet": "NAMESPACES: Final = {\n \"containerns\": \"urn:oasis:names:tc:opendocument:xmlns:container\", \n \"daisy\": \"http://www.daisy.org/z3986/2005/ncx/\", \n \"dc\": \"http://purl.org/dc/elements/1.1/\", \n \"ds\": \"http://www.w3.org/2000/09/xmldsig#\", \n \"epub\": \"http://www.idpf.org/2007/ops\", \n \"enc\": \"http://www.w3.org/2001/04/xmlenc#\",\n \"ncx\": \"http://www.daisy.org/z3986/2005/ncx/\", \n \"ns\": \"http://www.idpf.org/2016/encryption#compression\", \n \"opf\": \"http://www.idpf.org/2007/opf\", \n \"rdf\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\", \n \"smil\": \"http://www.w3.org/ns/SMIL\", \n \"svg\": \"http://www.w3.org/2000/svg\", \n \"html\": \"http://www.w3.org/1999/xhtml\", \n \"wsdl\": \"http://schemas.xmlsoap.org/wsdl/\", \n \"xhtml\": \"http://www.w3.org/1999/xhtml\", \n \"xlink\": \"http://www.w3.org/1999/xlink\", \n \"xml\": \"http://www.w3.org/XML/1998/namespace\", \n \"xs\": \"http://www.w3.org/2001/XMLSchema\", \n \"xsi\": \"http://www.w3.org/2001/XMLSchema-instance\", \n}" }, { "identifier": "remap_links", "path": "epub3/util/remap.py", "snippet": "def remap_links(\n manifest, \n pathmap, \n encoding=\"utf-8\", \n link_patterns=LINK_PATTERNS, \n):\n changed = []\n for predicate, patterns in link_patterns:\n for item in manifest.filter_by_attr(predicate):\n try:\n text = item.read_text(encoding=encoding)\n href = unquote(item[\"href\"])\n basedir = dirname(href)\n if type(patterns) is list:\n ls = []\n for subpats in patterns:\n repls = list(path_repl_iter(chain_finditer(text, subpats), pathmap, basedir))\n if repls:\n ls.append(repls)\n if not ls:\n repls = None\n elif len(ls) > 1:\n repls = sorted(chain.from_iterable(ls))\n else:\n repls = ls[0]\n else:\n repls = list(path_repl_iter(chain_finditer(text, patterns), pathmap, basedir))\n if repls:\n text = \"\".join(apply_repl_iter(text, repls))\n item.write_text(text, encoding=encoding)\n changed.append(href)\n except:\n pass\n return changed" }, { "identifier": "PyLinq", "path": "epub3/util/stream.py", "snippet": "class PyLinq(Stream, AggregateMixin, ItertoolsMixin):\n\n def __init__(self, iterable=None):\n if iterable is None:\n iterable = []\n super().__init__(iterable)\n\n def iter(self):\n return self @ iter(self.iterable)\n\n def reversed(self):\n return self @ reversed(self.iterable)\n\n def length(self):\n return self @ len(self.iterable)\n\n def add(self, element):\n return self.chain((element,))\n\n def all_equal(self):\n \"Returns True if all the elements are equal to each other\"\n g = iter(self.groupby())\n return next(g, True) and not next(g, False)\n\n def contains(self, element, key=None):\n return element in self.map(key)\n\n def difference(self, other, key=None, left_key=None, right_key=None):\n other = (self @ other).map(key or right_key)\n selectors = self.map(key or left_key).notin(other)\n return self.compress(selectors)\n\n @typed_method\n def distinct(self, key=None):\n # A simpler but not equivalent implementation as following:\n # return self @ self.group_by(key).each.first()\n hashable, unhashable = set(), []\n for i, k in self.pair(key):\n if k not in hashable and k not in unhashable:\n try:\n hashable.add(k)\n except TypeError:\n unhashable.append(k)\n yield i\n\n def element_at(self, n, default=undefined):\n try:\n return self[n]\n except TypeError as exc:\n if type(n) is int:\n if n >= 0:\n r = tuple(self.islice(n, n+1))\n if r:\n return r[0]\n else:\n r = deque(self, -n)\n if len(r) == -n:\n return r[0]\n if default is not undefined:\n return default\n raise LookupError(f'No element found at {n!r}') from exc\n\n def first(self, default=undefined):\n # self.element_at(0, default)\n if default is undefined:\n try:\n return next(iter(self))\n except StopIteration as exc:\n raise LookupError('No such first element') from exc\n return next(iter(self), default)\n\n def first_true(self, default=None, predicate=None):\n \"\"\"Returns the first true value in the iterable.\n\n If no true value is found, returns *default*\n\n If *predicate* is not None, returns the first item\n for which predicate(item) is true.\n\n \"\"\"\n return next(iter(self.filter(predicate)), default)\n\n @typed_method\n def flatten(list_of_lists):\n \"Flatten one level of nesting\"\n return itertools.chain.from_iterable(self.iterable)\n\n def group_by(self, key=None):\n groupers = self.orderby(key=key).groupby(key=key)\n return groupers.map(lambda args: Grouper.make_grouper(*args))\n\n @typed_method\n def group_join(self, other, key=None, left_key=None, right_key=None):\n left_key, right_key = key or left_key, key or right_key\n left = {i.key: tuple(i) for i in self.group_by(left_key)}\n right = {i.key: tuple(i) for i in (self @ other).group_by(right_key)}\n for k in sorted(left.keys() | right.keys()):\n grouper = itertools.product(left.get(k, ()), right.get(k, ()))\n yield Grouper.make_grouper(k, grouper)\n\n def intersection(self, other, key=None, left_key=None, right_key=None):\n return self.join(other, key, left_key, right_key).map(lambda x: x[0])\n\n def isin(self, other):\n if isinstance(other, Stream):\n other = other.data\n if not isinstance(other, (Set, Mapping)):\n if not isinstance(other, Sequence):\n other = tuple(other)\n try:\n other = set(other)\n except TypeError:\n pass\n return self.map(lambda x: x in other)\n\n def join(self, other, key=None, left_key=None, right_key=None):\n left_key = key or left_key or identity_function\n right_key = key or right_key or identity_function\n judge = lambda x: left_key(x[0]) == right_key(x[1])\n return self.product(other).filter(judge)\n\n def last(self, default=undefined):\n # self.element_at(-1, default)\n value = default\n for value in self: pass\n if value is undefined:\n raise LookupError('No such last element')\n return value\n\n @typed_method\n def ncycles(self, n):\n \"Returns the sequence elements n times\"\n return itertools.chain.from_iterable(itertools.repeat(tuple(self.iterable), n))\n\n def nth(self, n, default=undefined):\n \"Returns the nth item or a default value\"\n if isinstance(self.iterable, Sequence):\n try:\n return self.iterable[n]\n except LookupError:\n if default is undefined:\n raise\n return default\n try:\n return next(iter(self.islice(n, None)))\n except StopIteration as e:\n if default is undefined:\n raise LookupError(n) from e\n return default\n\n @typed_method\n def prepend(self, *values):\n \"Prepend a single value in front of an iterator\"\n return itertools.chain(values, self.iterable)\n\n def take(self, n):\n return self.islice(n)\n\n def notin(self, other):\n return self.isin(other).map(lambda x: not x)\n\n def orderby(self, key=None, reverse=False):\n return self.collect(sorted, key=key, reverse=reverse)\n\n def order_by(self, kwargs_orders=None, reverse_orders=False):\n data = list(self)\n if kwargs_orders:\n if reverse_orders:\n kwargs_orders = reversed(kwargs_orders)\n for kwargs in kwargs_orders:\n data.sort(**kwargs)\n return self @ data\n\n @typed_method\n def pair(self, key=None):\n if key is None:\n for i in self:\n yield i, i\n else:\n for i in self:\n yield i, key(i)\n\n def select(self, selector=None):\n return self.map(selector)\n\n def select_many(self, selector=None):\n return self.map(selector).chain_self_iterable()\n\n def single(self, default=undefined):\n n = 0\n for n, v in zip(range(1, 3), self): pass\n if n == 0:\n if default is not undefined:\n return default\n raise LookupError('No elements exception occured')\n elif n == 2:\n raise LookupError('More than one element exception occured')\n return v\n\n def skip(self, n):\n return self.islice(n, None)\n\n def skipwhile(self, predicate):\n return self.dropwhile(predicate)\n\n def tail(self, n):\n return self.collect(deque, n)\n\n def where(self, predicate=None):\n return self.filter(predicate)\n\n def zip(self, *iterables):\n return zip(self, *iterables)" }, { "identifier": "el_add", "path": "epub3/util/xml.py", "snippet": "def el_add(\n el: Element, \n /, \n name: str, \n attrib: Optional[Mapping] = None, \n text=None, \n tail=None, \n namespaces: Optional[Mapping] = None, \n) -> Element:\n \"\"\"\n \"\"\"\n name = extract_name(name)\n if not name:\n raise ValueError(\"unable to determine name\")\n try:\n nsmap = el.nsmap # type: ignore\n except:\n nsmap = {}\n if attrib:\n attrib0 = items(attrib)\n attrib = {}\n for key, val in attrib0:\n if key is None:\n attrib[key] = val\n elif isinstance(key, str):\n if key == \"xmlns\":\n if val:\n nsmap[None] = val\n else:\n nsmap.pop(None, None)\n elif key.startswith(\"xmlns:\"):\n if val:\n nsmap[key[6:]] = val\n else:\n nsmap.pop(key[6:], None)\n else:\n attrib[key] = val\n name = resolve_prefix(name, nsmap, namespaces, inherit=True)\n if USE_BUILTIN_XML:\n sel = el.makeelement(name, cast(dict[str, str], {}))\n else:\n sel = el.makeelement(name, nsmap=cast(dict[str, str], nsmap))\n el.append(sel)\n _el_set(sel, attrib, text, tail, nsmap, namespaces)\n return sel" }, { "identifier": "el_del", "path": "epub3/util/xml.py", "snippet": "def el_del(\n el: Element, \n path: Optional[str] = None, \n /, \n namespaces: Optional[Mapping] = None, \n) -> Optional[Element]:\n \"\"\"\n \"\"\"\n sel = el_find(el, path, namespaces) if path else el\n if sel is not None:\n try:\n pel = sel.getparent() # type: ignore\n except AttributeError:\n pel = el\n if pel is None or pel is sel:\n raise LookupError(f\"can't get parent element: {sel!r}\")\n pel.remove(sel)\n return sel" }, { "identifier": "el_iterfind", "path": "epub3/util/xml.py", "snippet": "def el_iterfind(\n el: Element, \n path: Optional[str] = None, \n /, \n namespaces: Optional[Mapping] = None, \n) -> Iterator[Element]:\n \"\"\"\n \"\"\"\n if not path or path in (\".\", \"*..\", \"*...\", \"./.\"):\n return iter((el,))\n nsmap: Optional[Mapping]\n if USE_BUILTIN_XML:\n nsmap = namespaces\n else:\n nsmap = el.nsmap\n if namespaces:\n nsmap.update(namespaces)\n if nsmap and (None in nsmap or \"\" in nsmap):\n if any(\n l == \"[\" and r != \"@\" \n for l, r in pairwise(m[0] for m in xpath_tokenizer_re.finditer(path))\n ):\n uri = get(nsmap, None) or get(nsmap, \"\") or \"*\"\n path = generalize_elementpath(path, uri=uri)\n nsmap = {k: v for k, v in items(nsmap) if k and v}\n return el.iterfind(path, nsmap) # type: ignore" }, { "identifier": "el_set", "path": "epub3/util/xml.py", "snippet": "def el_set(\n el: Element, \n path: Optional[str] = None, \n /, \n name: Optional[str] = None, \n attrib: Optional[Mapping] = None, \n text: Optional[str] = None, \n tail: Optional[str] = None, \n namespaces: Optional[Mapping] = None, \n merge: bool = False, \n) -> Element:\n \"\"\"\n \"\"\"\n sel = el_find(el, path, namespaces) if path else el\n if sel is not None:\n if text is None and tail is None and not attrib:\n return sel\n try:\n nsmap = sel.nsmap # type: ignore\n except:\n nsmap = None\n (_el_setmerge if merge else _el_set)(sel, attrib, text, tail, nsmap, namespaces)\n elif name is not None:\n if name == \"\":\n name = path\n sel = el_add(el, cast(str, name), attrib=attrib, text=text, tail=tail, namespaces=namespaces)\n else:\n raise LookupError(f\"element not found: {el!r}.find({path!r}) is None\")\n return sel" }, { "identifier": "undefined", "path": "epub3/util/undefined.py", "snippet": "class UndefinedType:\r\n def __new__(cls, /):\r\n def __init_subclass__(cls, /, **kwargs):\r\n def __eq__(self, other, /):\r" } ]
import errno import io import os import os.path as ospath import posixpath from copy import deepcopy from datetime import datetime from fnmatch import translate as wildcard_translate from functools import cached_property, partial from inspect import getfullargspec, isclass from io import IOBase, TextIOWrapper from operator import methodcaller from os import fsdecode, remove, stat, stat_result, PathLike from pathlib import PurePosixPath from posixpath import join as joinpath, normpath from pprint import pformat from re import compile as re_compile, escape as re_escape, Pattern from shutil import copy, copyfileobj from typing import cast, Any, Callable, Container, Mapping, MutableMapping, Optional from types import MappingProxyType from uuid import uuid4 from warnings import warn from weakref import WeakKeyDictionary, WeakValueDictionary from urllib.parse import quote, unquote from zipfile import ZipFile, ZIP_STORED from .util.file import File, RootFS, TemporaryFS, OPEN_MODES from .util.helper import guess_media_type, values, items, sup from .util.proxy import proxy_property, ElementAttribProxy, ElementProxy, NAMESPACES from .util.remap import remap_links from .util.stream import PyLinq from .util.xml import el_add, el_del, el_iterfind, el_set from .util.undefined import undefined, UndefinedType from lxml.etree import fromstring, tostring, _Element as Element, _ElementTree as ElementTree # type: ignore from xml.etree.ElementTree import fromstring, tostring, Element, ElementTree # type: ignore
13,902
elif isinstance(id_or_attrib, str): id = id_or_attrib itemref = super().get(id) if itemref is None: self.add(id, attrs) else: itemref.merge(attrs) else: self._proxy.merge(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if isinstance(id_or_attrib, Item): id_or_attrib = id_or_attrib._attrib["id"] if attrs: if isinstance(id_or_attrib, Itemref): itemref = id_or_attrib if itemref not in self: raise LookupError(f"no such itemref: {itemref!r}") itemref.update(attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib itemref = super().get(id) if itemref is None: self.add(id, attrs) else: itemref.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self class ePub(ElementProxy): __protected_keys__ = ("unique-identifier", "version") __optional_keys__ = ("dir", "id", "prefix", "xml:lang") __cache_get_key__ = False def __init__( self, /, path=None, workroot=None, maketemp=True, generate_id=None, init_opf=None, ): if path and ospath.lexists(path): self._zfile = zfile = ZipFile(path) contenter_xml = zfile.read("META-INF/container.xml") match = fromstring(contenter_xml).find( '{*}rootfiles/{*}rootfile[@media-type="application/oebps-package+xml"][@full-path]', ) if match is None: raise FileNotFoundError(errno.ENOENT, "no opf file specified in container.xml") self._opf_path = opf_path = unquote(match.attrib["full-path"]) self._opf_dir, self._opf_name = opf_dir, _ = posixpath.split(opf_path) root = fromstring(zfile.read(opf_path)) else: self._opf_path = "OEBPS/content.opf" self._opf_dir = "OEBPS" self._opf_name = "content.opf" if init_opf is None: content_opf = b'''\ <?xml version="1.0" encoding="utf-8"?> <package version="3.0" unique-identifier="BookId" xmlns="http://www.idpf.org/2007/opf"> <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf"> <dc:identifier id="BookId" opf:scheme="UUID">urn:uuid:%(uuid)s</dc:identifier> <dc:language>en</dc:language> <dc:title></dc:title> <meta property="dcterms:modified">%(mtime)s</meta> </metadata> <manifest /> <spine /> </package>''' % { b"uuid": bytes(str(uuid4()), "utf-8"), b"mtime": bytes(datetime.now().strftime("%FT%XZ"), "utf-8") } elif callable(init_opf): content_opf = init_opf() elif isinstance(init_opf, str): content_opf = bytes(init_opf, "utf-8") else: content_opf = init_opf root = fromstring(content_opf) super().__init__(root) self._path = path self._workroot = workroot self._maketemp = maketemp if generate_id is None: self._generate_id = None else: try: argcount = generate_id.__code__.co_argcount except AttributeError: argcount = len(getfullargspec(generate_id).args) if argcount == 0: self._generate_id = lambda href, seen_ids: generate_id() elif argcount == 1: self._generate_id = lambda href, seen_ids: generate_id(href) else: self._generate_id = generate_id self.metadata self.manifest self.spine def __del__(self): try: self._zfile.close() except: pass def __getattr__(self, attr, /): return getattr(self.manifest, attr) @cached_property def metadata(self, /):
#!/usr/bin/env python # coding: utf-8 __author__ = "ChenyangGao <https://chenyanggao.github.io>" __version__ = (0, 0, 1) __all__ = ["ePub", "Metadata", "DCTerm", "Meta", "Link", "Manifest", "Item", "Spine", "Itemref"] try: except ModuleNotFoundError: class DCTerm(ElementProxy): pass class Meta(ElementProxy): __protected_keys__ = ("property",) __optional_keys__ = ("dir", "id", "refines", "scheme", "xml:lang") class Link(ElementAttribProxy): __protected_keys__ = ("href", "rel") __optional_keys__ = ("hreflang", "id", "media-type", "properties", "refines") class Item(ElementAttribProxy): __const_keys__ = ("id",) __protected_keys__ = ("href", "media-type") __optional_keys__ = ("fallback", "media-overlay", "properties") __cache_get_state__ = lambda _, manifest: manifest def __init__(self, root: Element, manifest, /): super().__init__(root) self._manifest = manifest def __eq__(self, other, /): if type(self) is not type(other): return NotImplemented return self._manifest is other._manifest and self._attrib["href"] == other._attrib["href"] def __fspath__(self, /): return unquote(self._attrib["href"]) def __hash__(self, /): return hash((self._root, id(self._manifest))) def __setitem__(self, key, value, /): if key == "href": if value is None: raise ValueError("can't set href to None") self.rename(val) else: super().__setitem__(key, value) return self @property def filename(self, /): return PurePosixPath(joinpath(self.home, self)) @property def home(self, /): return PurePosixPath(self._manifest._epub._opf_dir) @property def name(self, /): return self.path.name @property def path(self, /): return PurePosixPath(self) @property def _parent(self, /): return posixpath.dirname(unquote(self._attrib["href"])) @property def parent(self, /): return self.path.parent @property def parents(self, /): return self.path.parents @property def parts(self, /): return self.path.parts @property def stem(self, /): return self.path.stem @property def suffix(self, /): return self.path.suffix @property def suffixes(self, /): return self.path.suffixes def update(self, attrib=None, /, **attrs): if attrib: attrib = dict(attrib) if attrs: attrib.update(attrs) else: attrib = attrs href = attrib.pop("href", None) if href: self.rename(href) if attrib: super().update(attrib) return self def is_relative_to(self, /, *other): return self.path.is_relative_to(*other) def joinpath(self, /, *others): return PurePosixPath(normpath(joinpath(self._parent, *others))) __truediv__ = joinpath def relpath(self, other, /): return PurePosixPath(posixpath.relpath(other, self._parent)) def relative_to(self, /, *other): return self.path.relative_to(*other) def with_name(self, /, name): return self.path.with_name(str(name)) def with_stem(self, /, stem): return self.path.with_stem(str(stem)) def with_suffix(self, /, suffix): return self.path.with_suffix(str(suffix)) def exists(self, /): return self._manifest.exists(self) def is_file(self, /): return self.exists() def is_dir(self, /): return False def is_symlink(self, /): return False def glob(self, /, pattern="*", ignore_case=False): return self._manifest.glob(pattern, self, ignore_case=ignore_case) def rglob(self, /, pattern="", ignore_case=False): return self._manifest.rglob(pattern, self, ignore_case=ignore_case) def iterdir(self, /): return self._manifest.iterdir(self) def match(self, /, path_pattern, ignore_case=False): path_pattern = path_pattern.strip("/") if not path_pattern: return False pattern = joinpath(*posix_glob_translate_iter(path_pattern)) if ignore_case: pattern = "(?i:%s)" % pattern return re_compile(pattern).fullmatch(self._attrib["href"]) is not None def open( self, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): return self._manifest.open( self, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, /, buffering=0): return self._manifest.read(self, buffering=buffering) read_bytes = read def read_text(self, /, encoding=None): return self._manifest.read_text(self, encoding=encoding) def remove(self, /): self._manifest.remove(self) return self def rename(self, dest_href, /, repair=False): return self._manifest.rename(self, dest_href, repair=repair) def batch_rename(self, mapper, /, predicate=None, repair=False): return self._manifest.batch_rename(self, mapper, predicate=predicate, repair=repair) def replace(self, href, /): self._manifest.replace(self, href) return self def stat(self, /) -> Optional[stat_result]: return self._manifest.stat(self) def touch(self, /): self._manifest.touch(self) return self unlink = remove def write(self, /, data): return self._manifest.write(self, data) write_bytes = write def write_text(self, /, text, encoding=None, errors=None, newline=None): return self._manifest.write_text(self, text, encoding=encoding, errors=errors, newline=newline) class Itemref(ElementAttribProxy): __const_keys__ = ("idref",) __optional_keys__ = ("id", "linear", "properties") @property def linear(self, /): return "no" if self._attrib.get("linear") == "no" else "yes" @linear.setter def linear(self, value, /): self._attrib["linear"] = "no" if value == "no" else "yes" class Metadata(ElementProxy): __wrap_class_map__ = {"{*}meta": Meta, "{*}": Link, "dc:*": DCTerm} def __repr__(self, /): return f"{super().__repr__()}\n{pformat(self.iter().list())}" @property def info(self, /): return tuple(meta.info for meta in self.iter()) def add( self, name: str = "meta", /, attrib: Optional[Mapping] = None, text: Optional[str] = None, tail: Any = undefined, **_disregards, ): return super().add(name, attrib=attrib, text=text) def dc( self, name: str, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if text_value is not undefined: if find_attrib: find_attrib = {**find_attrib, "": text_value} else: find_attrib = {"": text_value} return self.setfind( "dc:%s" % name, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def meta( self, preds: str = "", /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): return self.setfind( "{*}meta%s" % preds, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def name_meta( self, name, content: Optional[str] = None, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "name": name} else: find_attrib = {"name": name} if content is not None: find_attrib["content"] = content return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def property_meta( self, property, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "property": property} else: find_attrib = {"property": property} if text_value is not undefined: find_attrib[""] = text_value return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) class ManifestProxy(ElementAttribProxy): __optional_keys__ = ("id",) class Manifest(dict[str, Item]): def __init__(self, /, root: Element, epub): self._root = root self._attrib = root.attrib self._epub = epub self._proxy = ManifestProxy(root) self._href_to_id: dict[str, str] = {} self._href_to_file: dict[str, File] = {} if len(root): href_to_id = self._href_to_id dangling_items = [] for item in root.iterfind("{*}item"): id = item.attrib.get("id") href = item.attrib.get("href") if id is None or not href: dangling_items.append(item) continue id = cast(str, id) href = cast(str, unquote(href)) super().__setitem__(id, Item(item, self)) href_to_id[href] = id if dangling_items: for item in reversed(dangling_items): root.remove(item) warn(f"removed a dangling item element: {item!r}") zfile = epub.__dict__.get("_zfile") opf_dir = epub._opf_dir if zfile: href_to_file = self._href_to_file for href in href_to_id: zpath = joinpath(opf_dir, href) zinfo = zfile.NameToInfo.get(zpath) if not zinfo or zinfo.is_dir(): warn(f"missing file in original epub: {href!r}") href_to_file[href] = File(str(uuid4()), self._workfs) else: href_to_file[href] = File(zpath, zfile, open_modes="r") def __init_subclass__(self, /, **kwargs): raise TypeError("subclassing is not allowed") def __call__(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") return href if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id[href] except LookupError as e: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") from e return super().__getitem__(id) def __contains__(self, other, /): if isinstance(other, Item): return other._manifest is self and super().__contains__(other["id"]) return super().__contains__(other) def __delitem__(self, key, /): pop = self.pop if isinstance(key, int): el = self._root[key] try: id = el.attrib["id"] except AttributeError: try: self._root.remove(el) except: pass else: pop(id) elif isinstance(key, slice): root = self._root for el in root[key]: try: id = el.attrib["id"] except AttributeError: try: root.remove(el) except: pass else: pop(id, None) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") pop(key["id"]) elif isinstance(key, str): pop(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") return self def __getitem__(self, key, /): def wrap(el): try: if el.tag == "item" or el.tag.endswith("}item"): return Item(el, self) return ElementProxy(el) except AttributeError: return el if isinstance(key, int): return wrap(self._root[key]) elif isinstance(key, slice): return list(map(wrap, self._root[key])) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") return key elif isinstance(key, str): return super().__getitem__(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") def __setitem__(self, id, value, /): if id not in self: raise LookupError(f"no such item: {id!r}") if isinstance(id, Item): item = id else: item = super().__getitem__(id) href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return self @cached_property def _workfs(self, /): if self._epub._maketemp: return TemporaryFS(self._epub._workroot) else: return RootFS(self._epub._workroot) @cached_property def href_to_id(self, /): return MappingProxyType(self._href_to_id) @cached_property def href_to_file(self, /): return MappingProxyType(self._href_to_file) @property def home(self, /): return self._epub._opf_dir @property def attrib(self, /): return self._attrib @property def proxy(self, /): return self._proxy @property def info(self, /): return tuple(item.info for item in self.values()) delete = __delitem__ def clear(self, /): self._root.clear() self._href_to_file.clear() self._href_to_id.clear() super().clear() return self def pop(self, id, /, default=undefined): if id not in self: if default is undefined: raise LookupError(f"no such item: {id!r}") return default if isinstance(id, Item): id = id["id"] item = super().pop(id) try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return item def popitem(self, /): id, item = super().popitem() try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return id, item def set(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(href, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return item def setdefault(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(value, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: if isinstance(value, Mapping) and not ("open" in value and callable(value["open"])): item.merge(value) return item def merge(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.merge(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.merge(attrs) else: self._proxy.merge(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.update(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self #################### SubElement Methods #################### @PyLinq.streamify def filter(self, /, predicate=None): if not callable(predicate): return iter(self.values()) return filter(predicate, self.values()) @PyLinq.streamify def filter_by_attr(self, predicate=None, attr="media-type", /): def activate_predicate(predicate): if predicate is None: return None if callable(predicate): return predicate elif isinstance(predicate, Pattern): return predicate.search elif isinstance(predicate, str): use_false = False if predicate.startswith(r"!"): use_false = True predicate = predicate[1:] predicate_startswith = predicate.startswith if predicate_startswith(r"="): predicate = predicate[1:].__eq__ elif predicate_startswith(r"~"): predicate = methodcaller("__contains__", predicate[1:]) elif predicate_startswith(r"^"): predicate = methodcaller("startswith", predicate[1:]) elif predicate_startswith(r"$"): predicate = methodcaller("endswith", predicate[1:]) elif predicate_startswith(r";"): predicate = lambda s, needle=predicate[1:]: needle in s.split() elif predicate_startswith(r","): predicate = lambda s, needle=predicate[1:]: needle in s.split(",") elif predicate_startswith(r"<"): predicate = re_compile(r"\b"+re_escape(predicate[1:])).search elif predicate_startswith(r">"): predicate = re_compile(re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"|"): predicate = re_compile(r"\b"+re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"*"): predicate = re_compile(wildcard_translate(predicate[1:])).fullmatch elif predicate_startswith(r"/"): predicate = re_compile(predicate[1:]).search elif predicate_startswith(r"%"): predicate = re_compile(predicate[1:]).fullmatch else: predicate = predicate.__eq__ if use_false: predicate = lambda s, _pred=predicate: not _pred(s) return predicate elif type(predicate) in (tuple, list): preds = tuple(pred for p in predicate if (pred:=activate_predicate(p)) is not None) if not preds: return None if type(predicate) is tuple: return lambda s, _preds=preds: any(p(s) for p in preds) else: return lambda s, _preds=preds: all(p(s) for p in preds) elif isinstance(predicate, Container): return predicate.__contains__ predicate = activate_predicate(predicate) if predicate is None: return filter(lambda item: attr in item, self.values()) return filter(lambda item: attr in item and predicate(item[attr]), self.values()) @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "item" or el.tag.endswith("}item")): yield ElementProxy(el) continue id = el.attrib.get("id") href = el.attrib.get("href") if not href: if id is None or not super().__contains__(id): try: root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass else: item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") else: self.pop(id, None) warn(f"removed an item because of missing href attribute: {item!r}") continue href = unquote(href) if not el.attrib.get("media-type"): el.attrib["media-type"] = guess_media_type(href) if id is None: yield self.add(href) elif super().__contains__(id): item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") yield item else: try: self._root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass def list(self, /, mapfn=None): if mapfn is None: return list(self.iter()) return list(map(mapfn, self.iter())) def audio_iter(self, /): return self.filter_by_attr("^audio/") def css_iter(self, /): return self.filter_by_attr("text/css") def font_iter(self, /): return self.filter_by_attr(("^font/", "^application/font-")) def image_iter(self, /): return self.filter_by_attr("^image/") def javascript_iter(self, /): return self.filter_by_attr(("text/javascript", "application/javascript", "application/ecmascript")) def media_iter(self, /): return self.filter_by_attr(("^audio/", "^image/", "^video/")) def text_iter(self, /): return self.filter_by_attr(("^text/", "$+xml")) def video_iter(self, /): return self.filter_by_attr("^video/") @PyLinq.streamify def html_item_ref_pair_iter(self, /): spine = self._epub.spine for id, itemref in spine.items(): yield self[id], itemref for item in self.filter_by_attr(("text/html", "application/xhtml+xml")): if item["id"] in spine: continue yield item, None #################### File System Methods #################### def add( self, href, /, file=None, fs=None, open_modes="r", id=None, media_type=None, attrib=None, ): if isinstance(href, Item): raise TypeError("can't directly add `Item` object") if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" if href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") uid = str(uuid4()) if id is None: generate_id = self._epub._generate_id if generate_id is None: id = uid else: keys = self.keys() id = generate_id(href, keys) while id in keys: nid = generate_id(href, keys) if nid == id: i = sup(lambda i: f"{i}_{nid}" in keys) id = f"{i}_{nid}" break id = nid if id in self: raise LookupError(f"id already exists: {id!r}") attrib = dict(attrib) if attrib else {} attrib["id"] = id attrib["href"] = quote(href, safe=":/?&=#") if media_type: attrib["media-type"] = media_type if fs is not None: file = File(file, fs=fs, open_modes=open_modes) elif file is None: file = File(uid, self._workfs) elif isinstance(file, IOBase) or hasattr(file, "read") and not hasattr(file, "open"): file0 = file file = File(uid, self._workfs) test_data = file0.read(0) if test_data == b"": copyfileobj(file0, self._workfs.open(uid, "wb")) elif test_data == "": attrib.setdefault("media-type", "text/plain") copyfileobj(file0, self._workfs.open(uid, "w")) else: raise TypeError(f"incorrect read behavior: {file0!r}") else: file = File(file, open_modes=open_modes) if not attrib.get("media-type"): attrib["media-type"] = guess_media_type(href) item = Item(el_add(self._root, "item", attrib=attrib, namespaces=NAMESPACES), self) super().__setitem__(id, item) self._href_to_id[href] = id self._href_to_file[href] = file return item def change( self, href, /, file=None, fs=None, open_modes="r", id=None, media_type=None, attrib=None, ): if fs is self._workfs: raise OSError(errno.EINVAL, f"Remapping the file that in the working fs is not supported, use `rename` instead: {fs!r}") if href in self.href_to_id: item = self[self.href_to_id[href]] if attrib: item.update(attrib) if media_type: item.media_type = media_type try: self.href_to_file[href].remove() except: pass self._href_to_file[href] = File(file, fs, open_modes) return item else: return self.add( href, file=file, fs=fs, open_modes=open_modes, id=id, media_type=media_type, attrib=attrib, ) def exists(self, href, /): if isinstance(href, Item): return href in self if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" return href in self._href_to_id @PyLinq.streamify def glob(self, pattern="*", dirname="", ignore_case=False): pattern = pattern.strip("/") if not pattern: return if isinstance(dirname, Item): dirname = posixpath.dirname(unquote(href._attrib["href"])) else: dirname = dirname.strip("/") if dirname: dirname = re_escape(dirname) pattern = joinpath(dirname, *posix_glob_translate_iter(pattern)) if ignore_case: pattern = "(?i:%s)" % pattern matches = re_compile(pattern).fullmatch for href, id in self._href_to_id.items(): if not matches(href): continue try: yield super().__getitem__(id) except KeyError: pass @PyLinq.streamify def iterdir(self, /, dirname=""): if isinstance(dirname, Item): dirname = posixpath.dirname(unquote(href._attrib["href"])) else: dirname = dirname.strip("/") for href, id in self._href_to_id.items(): if posixpath.dirname(href) != dirname: continue try: yield super().__getitem__(id) except KeyError: pass def open( self, href, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): if mode not in OPEN_MODES: raise ValueError(f"invalid open mode: {mode!r}") if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" href_to_file = self._href_to_file if href in self._href_to_id: if "x" in mode: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") file = href_to_file.get(href) uid = str(uuid4()) if file is None: href_to_file[href] = file = File(uid, self._workfs) elif not file.check_open_mode(mode): if "w" not in mode: try: fsrc = file.open("rb", buffering=0) except FileNotFoundError: if "r" in mode: raise else: with fsrc: copyfileobj(fsrc, self._workfs.open(uid, "wb")) href_to_file[href] = file = File(uid, self._workfs) elif "r" in mode: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") else: item = self.add(href) file = href_to_file[href] if "b" not in mode and encoding is None: encoding = "utf-8" return file.open( mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, href, /, buffering=0): with self.open(href, "rb", buffering=buffering) as f: return f.read() read_bytes = read def read_text(self, href, /, encoding=None): with self.open(href, "r", encoding=encoding) as f: return f.read() def remove(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") item = super().pop(id, None) if item is not None: try: self._root.remove(item._root) except: pass file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass def _rename(self, item, href, dest_href, /): try: id = self._href_to_id[dest_href] = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") if item is None: item = super().__getitem__(id) item._attrib["href"] = quote(dest_href, safe=":/?&=#") self._href_to_file[dest_href] = self._href_to_file.pop(href, None) def rename(self, href, dest_href, /, repair=False): result = {} if isinstance(href, Item): item = href if item not in self: raise LookupError(f"no such item: {item!r}") href = unquote(item._attrib["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" item = None if isinstance(dest_href, (bytes, PathLike)): dest_href = fsdecode(dest_href) else: dest_href = str(dest_href) assert (dest_href := dest_href.strip("/")), "empty href" result["pathpair"] = (href, dest_href) if href != dest_href: if dest_href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"target file exists: {dest_href!r}") self._rename(item, href, dest_href) if repair: result["repairs"] = remap_links(self, (href, dest_href)) return result def batch_rename(self, mapper, /, predicate=None, repair=False): result = {} result["pathmap"] = pathmap = {} result["fails"] = fails = {} if not callable(mapper) and isinstance(mapper, Mapping): def mapper(item, m=mapper): href = unquote(item["href"]) try: return m[href] except LookupError: return href if predicate is None: predicate = mapper if not callable(predicate) and isinstance(predicate, Mapping): predicate = lambda item, m=predicate: unquote(item["href"]) in m for item in self.filter(predicate): try: href, dest_href = self.rename(item, mapper(item))["pathpair"] if href != dest_href: pathmap[href] = dest_href except Exception as e: fails[unquote(item._attrib["href"])] = e if pathmap and repair: result["repairs"] = remap_links(self, pathmap) return result def replace(self, href, dest_href, /): if isinstance(href, Item): item = href if item not in self: raise LookupError(f"no such item: {item!r}") href = unquote(item._attrib["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" if href not in self._href_to_id: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") item = None if isinstance(dest_href, Item): dest_item = dest_href if dest_item not in self: raise LookupError(f"no such item: {dest_item!r}") dest_href = unquote(dest_item["href"]) else: if isinstance(dest_href, (bytes, PathLike)): dest_href = fsdecode(dest_href) else: dest_href = str(dest_href) assert (dest_href := dest_href.strip("/")), "empty href" dest_item = None if href == dest_href: return if dest_item is not None: del self[dest_item] elif dest_href in self._href_to_id: del self[self._href_to_id[dest_href]] self._rename(item, href, dest_href) def rglob(self, pattern="", dirname="", ignore_case=False): if pattern: pattern = joinpath("**", pattern.lstrip("/")) else: pattern = "**" return self.glob(pattern, dirname, ignore_case) def stat(self, href, /) -> Optional[stat_result]: if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" if href not in self._href_to_id: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") try: stat = self._href_to_file[href].stat except (AttributeError, LookupError): return None if callable(stat): return stat() return None def touch(self, href, /): try: self.open(href, "rb", buffering=0).close() except: self.open(href, "wb", buffering=0).close() unlink = remove def write(self, href, /, data): need_close = True if isinstance(data, File): fsrc = data.open("rb", buffering=0) elif callable(getattr(data, "read", None)): fsrc = data need_close = False elif isinstance(data, (str, PathLike)): fsrc = open(data, "rb", buffering=0) else: content = memoryview(data) with self.open(href, "wb") as f: return f.write(content) try: fsrc_read = fsrc.read test_data = fsrc_read(0) if test_data == "": fsrc_read = lambda n, read=fsrc_read: bytes(read(n), "utf-8") elif test_data: raise TypeError(f"incorrect read behavior: {fsrc!r}") with self.open(href, "wb") as fdst: fdst_write = fdst.write n = 0 while (buf := fsrc_read(1 << 16)): n += fdst_write(buf) return n finally: if need_close: fsrc.close() write_bytes = write def write_text(self, href, /, text, encoding=None, errors=None, newline=None): with self.open(href, "w", encoding=encoding, errors=errors, newline=newline) as f: return f.write(text) class SpineProxy(ElementAttribProxy): __optional_keys__ = ("id", "page-progression-direction") class Spine(dict[str, Itemref]): def __init__(self, root: Element, /, manifest: Manifest): self._root = root self._attrib = root.attrib self._proxy = SpineProxy(root) self._manifest = manifest if len(root): dangling_itemrefs = [] for itemref in root.iterfind("{*}itemref"): idref = itemref.attrib.get("idref") if idref is None or idref not in manifest: dangling_itemrefs.append(itemref) continue super().__setitem__(cast(str, idref), Itemref(itemref)) if dangling_itemrefs: for itemref in reversed(dangling_itemrefs): warn(f"removed a dangling item element: {itemref!r}") root.remove(itemref) def __init_subclass__(self, /, **kwargs): raise TypeError("subclassing is not allowed") def __call__(self, id, /, attrib=None): if isinstance(id, Item): id = id._attrib["id"] if isinstance(id, Itemref): if id not in self: raise LookupError(f"no such itemref: {id!r}") itemref = id else: itemref = super().get(id) if not attrib: return itemref if itemref is None: if id not in self._manifest: raise LookupError(f"no such item: {id!r}") itemref = self._add(id, attrib) else: itemref.update(attrib) return itemref def __contains__(self, id, /): if isinstance(id, Itemref): return super().get(id._attrib["idref"]) is id return super().__contains__(id) def __delitem__(self, key, /): pop = self.pop if isinstance(key, Itemref): if key not in self: raise LookupError(f"no such itemref: {key!r}") key = key._attrib["idref"] elif isinstance(key, Item): key = key._attrib["id"] if isinstance(key, str): pop(key, None) elif isinstance(key, int): el = self._root[key] try: id = el.attrib["idref"] except AttributeError: try: self._root.remove(el) except: pass else: pop(id) elif isinstance(key, slice): root = self._root for el in root[key]: try: id = el.attrib["idref"] except AttributeError: try: root.remove(el) except: pass else: pop(id, None) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`, `Itemref`") return self def __getitem__(self, key, /): def wrap(el): try: if el.tag == "itemref" or el.tag.endswith("}itemref"): return Itemref(el) return ElementProxy(el) except AttributeError: return el if isinstance(key, Itemref): if key not in self: raise LookupError(f"no such itemref: {key!r}") return key if isinstance(key, Item): key = key._attrib["id"] if isinstance(key, str): return super().__getitem__(key) elif isinstance(key, int): return wrap(self._root[key]) elif isinstance(key, slice): return list(map(wrap, self._root[key])) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`, `Itemref`") def __setitem__(self, id, attrib, /): if isinstance(key, Item): key = key._attrib["id"] if isinstance(key, Itemref): if key not in self: raise LookupError(f"no such itemref: {key!r}") itemref = key else: itemref = super().get(id) if itemref is None: self.add(key, attrib=attrib) else: itemref.update(attrib) return self @property def attrib(self, /): return self._attrib @property def manifest(self, /): return self._manifest @property def proxy(self, /): return self._proxy @property def info(self, /): return tuple(itemref.info for itemref in self.values()) delete = __delitem__ def _add(self, id, /, attrib=None): if attrib: attrib = dict(attrib, idref=id) else: attrib = {"idref": id} itemref = Itemref(el_add(self._root, "itemref", attrib=attrib, namespaces=NAMESPACES)) super().__setitem__(id, itemref) return itemref def add(self, id, /, attrib=None): if isinstance(id, Itemref): raise TypeError("can't directly add `Itemref` object") if isinstance(id, Item): id = id._attrib["id"] elif id not in self._manifest: raise LookupError(f"no such id in manifest: {id!r}") if super().__contains__(id): raise LookupError(f"id already exists: {id!r}") return self._add(id, attrib) def clear(self, /): self._root.clear() super().clear() return self @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "itemref" or el.tag.endswith("}itemref")): yield ElementProxy(el) continue idref = el.attrib.get("idref") if idref is None or idref not in self._manifest: try: root.remove(el) warn(f"removed a dangling itemref element: {el!r}") except: pass elif idref not in self: itemref = self._add(idref) yield itemref else: itemref = self[idref] if itemref._root is not el: raise RuntimeError(f"different itemref elements {el!r} and {itemref._root!r} share the same id {idref!r}") yield itemref def list(self, /, mapfn=None): if mapfn is None: return list(self.iter()) return list(map(mapfn, self.iter())) def pop(self, id, /, default=undefined): if isinstance(id, Item): id = id._attrib["id"] if isinstance(id, Itemref): if id not in self: if default is undefined: raise LookupError(f"no such itemref: {id!r}") return default itemref = id super().__delitem__(itemref._attrib["idref"]) else: if id not in self: if default is undefined: raise LookupError(f"no such itemref: {id!r}") return default itemref = super().pop(id) try: self._root.remove(itemref._root) except: pass return itemref def popitem(self, /): id, itemref = super().popitem() try: self._root.remove(itemref._root) except: pass return id, itemref def set(self, id, /, attrib=None): if isinstance(id, Item): id = id._attrib["id"] if isinstance(id, Itemref): if id not in self: raise LookupError(f"no such itemref: {id!r}") itemref = id else: itemref = super().get(id) if itemref is None: return self.add(id, attrib) itemref.update(attrib) return itemref def setdefault(self, id, /, attrib=None): if isinstance(id, Item): id = id._attrib["id"] if isinstance(id, Itemref): if id not in self: raise LookupError(f"no such itemref: {id!r}") itemref = id else: itemref = super().get(id) if itemref is None: return self.add(id, attrib) itemref.merge(attrib) return itemref def merge(self, id_or_attrib=None, /, **attrs): if isinstance(id_or_attrib, Item): id_or_attrib = id_or_attrib._attrib["id"] if attrs: if isinstance(id_or_attrib, Itemref): itemref = id_or_attrib if itemref not in self: raise LookupError(f"no such itemref: {itemref!r}") itemref.merge(attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib itemref = super().get(id) if itemref is None: self.add(id, attrs) else: itemref.merge(attrs) else: self._proxy.merge(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if isinstance(id_or_attrib, Item): id_or_attrib = id_or_attrib._attrib["id"] if attrs: if isinstance(id_or_attrib, Itemref): itemref = id_or_attrib if itemref not in self: raise LookupError(f"no such itemref: {itemref!r}") itemref.update(attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib itemref = super().get(id) if itemref is None: self.add(id, attrs) else: itemref.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self class ePub(ElementProxy): __protected_keys__ = ("unique-identifier", "version") __optional_keys__ = ("dir", "id", "prefix", "xml:lang") __cache_get_key__ = False def __init__( self, /, path=None, workroot=None, maketemp=True, generate_id=None, init_opf=None, ): if path and ospath.lexists(path): self._zfile = zfile = ZipFile(path) contenter_xml = zfile.read("META-INF/container.xml") match = fromstring(contenter_xml).find( '{*}rootfiles/{*}rootfile[@media-type="application/oebps-package+xml"][@full-path]', ) if match is None: raise FileNotFoundError(errno.ENOENT, "no opf file specified in container.xml") self._opf_path = opf_path = unquote(match.attrib["full-path"]) self._opf_dir, self._opf_name = opf_dir, _ = posixpath.split(opf_path) root = fromstring(zfile.read(opf_path)) else: self._opf_path = "OEBPS/content.opf" self._opf_dir = "OEBPS" self._opf_name = "content.opf" if init_opf is None: content_opf = b'''\ <?xml version="1.0" encoding="utf-8"?> <package version="3.0" unique-identifier="BookId" xmlns="http://www.idpf.org/2007/opf"> <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf"> <dc:identifier id="BookId" opf:scheme="UUID">urn:uuid:%(uuid)s</dc:identifier> <dc:language>en</dc:language> <dc:title></dc:title> <meta property="dcterms:modified">%(mtime)s</meta> </metadata> <manifest /> <spine /> </package>''' % { b"uuid": bytes(str(uuid4()), "utf-8"), b"mtime": bytes(datetime.now().strftime("%FT%XZ"), "utf-8") } elif callable(init_opf): content_opf = init_opf() elif isinstance(init_opf, str): content_opf = bytes(init_opf, "utf-8") else: content_opf = init_opf root = fromstring(content_opf) super().__init__(root) self._path = path self._workroot = workroot self._maketemp = maketemp if generate_id is None: self._generate_id = None else: try: argcount = generate_id.__code__.co_argcount except AttributeError: argcount = len(getfullargspec(generate_id).args) if argcount == 0: self._generate_id = lambda href, seen_ids: generate_id() elif argcount == 1: self._generate_id = lambda href, seen_ids: generate_id(href) else: self._generate_id = generate_id self.metadata self.manifest self.spine def __del__(self): try: self._zfile.close() except: pass def __getattr__(self, attr, /): return getattr(self.manifest, attr) @cached_property def metadata(self, /):
return Metadata(el_set(self._root, "{*}metadata", "metadata", attrib={
17
2023-11-20 14:46:41+00:00
16k
ymp5078/AI-SAM
segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[\n None, :, :, :\n ]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(\n point_coords, dtype=torch.float, device=self.device\n )\n labels_torch = torch.as_tensor(\n point_labels, dtype=torch.int, device=self.device\n )\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(\n mask_input, dtype=torch.float, device=self.device\n )\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(\n low_res_masks, self.input_size, self.original_size\n )\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert (\n self.features is not None\n ), \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,119
# Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) data["rles"] = mask_to_rle_pytorch(data["masks"]) del data["masks"] return data @staticmethod def postprocess_small_regions( mask_data: MaskData, min_area: int, nms_thresh: float ) -> MaskData: """ Removes small disconnected regions and holes in masks, then reruns box NMS to remove any new duplicates. Edits mask_data in place. Requires open-cv as a dependency. """ if len(mask_data["rles"]) == 0: return mask_data # Filter small disconnected regions and holes new_masks = [] scores = [] for rle in mask_data["rles"]: mask = rle_to_mask(rle)
# -*- coding: utf-8 -*- # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [ coco_encode_rle(rle) for rle in mask_data["rles"] ] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) data["rles"] = mask_to_rle_pytorch(data["masks"]) del data["masks"] return data @staticmethod def postprocess_small_regions( mask_data: MaskData, min_area: int, nms_thresh: float ) -> MaskData: """ Removes small disconnected regions and holes in masks, then reruns box NMS to remove any new duplicates. Edits mask_data in place. Requires open-cv as a dependency. """ if len(mask_data["rles"]) == 0: return mask_data # Filter small disconnected regions and holes new_masks = [] scores = [] for rle in mask_data["rles"]: mask = rle_to_mask(rle)
mask, changed = remove_small_regions(mask, min_area, mode="holes")
13
2023-11-26 23:42:53+00:00
16k
sophiaalthammer/alforrankers
matchmaker/utils/input_pipeline.py
[ { "identifier": "ConditionalQueryGenerationInferenceReader", "path": "matchmaker/dataloaders/query_generation_inference_loader.py", "snippet": "class ConditionalQueryGenerationInferenceReader(DatasetReader):\n \"\"\"\n Read a tsv file containing a passage collection.\n \n Expected format for each input line: <doc_id>\\t<doc_sequence_string>\n The output of ``read`` is a list of ``Instance`` s with the fields:\n doc_tokens: ``TextField`` \n target_query_type: ``MetadataField``\n target_query_length: ``MetadataField``\n\n\n Parameters\n ----------\n tokenizer : ``Tokenizer``, optional\n Tokenizer to use to split the input sequences into words or other kinds of tokens. \n token_indexers : ``Dict[str, TokenIndexer]``, optional\n Indexers used to define input (source side) token representations. Defaults to\n ``{\"tokens\": SingleIdTokenIndexer()}``.\n \"\"\"\n def __init__(self,\n tokenizer: Tokenizer = None,\n token_indexers: Dict[str, TokenIndexer] = None,\n \n max_doc_length:int = -1,\n max_query_length:int = -1,\n\n target_distribution_file:str = None,\n target_number_of_queries_total:int = 1 # ATTENTION, this is per worker!! (divide on your own if using > 1 worker)\n ):\n\n super().__init__(\n manual_distributed_sharding=True,\n manual_multiprocess_sharding=True\n )\n self._tokenizer = tokenizer\n self._token_indexers = token_indexers\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n self.target_number_of_queries_total = target_number_of_queries_total\n\n target_distribution,(target_label_types,target_label_lengths) = approximate_target_distribution_from_file(target_distribution_file)\n\n console = Console()\n\n console.log(\"[QueryGenLoader] Targeting distribution:\",target_distribution*target_number_of_queries_total,\", labels\",(target_label_types,target_label_lengths))\n\n self.target_distribution = target_distribution\n self.target_label_types = target_label_types\n self.target_label_lengths = target_label_lengths\n\n @overrides\n def _read(self, file_path):\n with open(cached_path(file_path), \"r\", encoding=\"utf8\") as data_file:\n #logger.info(\"Reading instances from lines in file at: %s\", file_path)\n for i,line in enumerate(self.shard_iterable(data_file)):\n if i == self.target_number_of_queries_total:\n break\n\n line = line.strip()\n\n if not line:\n continue\n\n line_parts = line.split('\\t')\n if len(line_parts) == 2:\n doc_id, doc_sequence = line_parts\n else:\n raise ConfigurationError(\"Invalid line format: %s\" % (line))\n\n yield self.text_to_instance(doc_id, doc_sequence)\n\n @overrides\n def text_to_instance(self, doc_id:str, doc_sequence: str) -> Instance:\n\n doc_id_field = MetadataField(doc_id)\n\n target_idx = np.random.choice(len(self.target_distribution),1,replace=False,p=self.target_distribution)[0]\n\n concat_sequence = (\":query_group\"+str(self.target_label_types[target_idx]) + \" \"+ str(self.target_label_lengths[target_idx]) + \" \" + doc_sequence)\n\n doc_tokenized = self._tokenizer.tokenize(concat_sequence, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n doc_field = TransformerTextField(**doc_tokenized,padding_token_id=self._tokenizer._tokenizer.pad_token_id)\n\n return Instance({\n \"doc_id\":doc_id_field,\n \"doc_tokens\":doc_field,\n \"target_query_type\":MetadataField(self.target_label_types[target_idx]),\n \"target_query_length\":MetadataField(self.target_label_lengths[target_idx])})" }, { "identifier": "PseudoLabelDatasetLoader", "path": "matchmaker/dataloaders/pseudo_label_training_loader.py", "snippet": "class PseudoLabelDatasetLoader():\n \"\"\"\n \n \"\"\"\n\n def __init__(\n self,\n\n query_file: str,\n collection_file: str,\n rankings_with_teacher_scores: str,\n\n selection_type: str, # values: \"scores\", \"scores-non-fixed\", \"top-rank\"\n min_pos_score: float,\n max_diff_to_be_pos: float,\n min_diff_to_neg: float,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences = False,\n random_seed=42,\n ):\n\n self.query_file = query_file\n self.collection_file = collection_file\n self.rankings_with_teacher_scores = rankings_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.selection_type = selection_type\n self.min_pos_score = min_pos_score\n self.max_diff_to_be_pos = max_diff_to_be_pos\n self.min_diff_to_neg = min_diff_to_neg\n\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n self.uniqe_pos_only = False\n\n def __iter__(self) -> Iterator[TensorDict]:\n \n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[PseudoLabel] Loading rankings from:\",self.rankings_with_teacher_scores)\n self.pos_by_qid = defaultdict(list)\n self.neg_by_qid = defaultdict(list)\n\n stat_total_pos = 0\n stat_total_neg = 0\n with open(self.rankings_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n current_q_id = \"\"\n current_top_score = 0\n for line in qf:\n ls = line.split() # pos_score<t>neg_score<t>pos_id<t>neg_id\n if current_q_id != ls[0]:\n current_q_id = ls[0]\n current_top_score = float(ls[3])\n if self.selection_type == \"scores\" or self.selection_type == \"scores-non-fixed\":\n if current_top_score >= self.min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],float(ls[3])))\n stat_total_pos+=1\n\n elif self.selection_type == \"top-rank\": \n self.pos_by_qid[ls[0]].append((ls[1],float(ls[3])))\n stat_total_pos+=1\n else:\n score = float(ls[3])\n if self.selection_type == \"scores\":\n if score >= current_top_score - self.max_diff_to_be_pos and score >= self.min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n elif self.selection_type == \"scores-non-fixed\":\n if score >= current_top_score - self.max_diff_to_be_pos: # TODO apply this fix and score >= min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n elif self.selection_type == \"top-rank\": \n if score >= current_top_score - self.max_diff_to_be_pos:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n\n console.log(\"[PseudoLabel] Loading collection from:\",self.collection_file)\n self.collection = {}\n self.collection_ids = []\n with open(self.collection_file, \"r\", encoding=\"utf8\") as cf:\n for line in cf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.collection[ls[0]] = ls[1].rstrip()[:100_000]\n self.collection_ids.append(ls[0])\n\n console.log(\"[PseudoLabel] Loading queries from:\",self.query_file)\n self.queries = {}\n with open(self.query_file, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.queries[ls[0]] = ls[1].rstrip()\n\n self.query_ids = np.array(sorted(list(set(self.pos_by_qid.keys()).intersection(set(self.neg_by_qid.keys())))))\n\n console.log(f\"[PseudoLabel] Done loading! Using {stat_total_pos} positives and {stat_total_neg} negatives for {len(self.query_ids)} queries\")\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n \n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while len(self.query_ids) > query_target_count:\n\n main_instances = []\n\n #while len(main_instances) < self.batch_size:\n\n #q_ids = random.sample(self.query_ids, query_target_count)\n q_id_idxs = random.sample(range(len(self.query_ids)), query_target_count)\n \n query_idx_remove_buffer = [] # only used for self.uniqe_pos_only==True, we need to buffer the removals, \n # otherwise we break the for loop access of already drawn q_ids\n\n for q_idx in q_id_idxs:\n q_id = self.query_ids[q_idx]\n\n #if q_id not in self.pos_by_qid or q_id not in self.neg_by_qid: # need to make sure that we did not just remove the query from the dataset (only for self.uniqe_pos_only==True)\n # continue\n\n pos = random.choice(self.pos_by_qid[q_id])\n neg = random.choice(self.neg_by_qid[q_id])\n\n if self.uniqe_pos_only:\n self.pos_by_qid[q_id].remove(pos) # ok to remove here, because q_id is unique in this for loop\n if len(self.pos_by_qid[q_id]) == 0:\n #del self.pos_by_qid[q_id]\n query_idx_remove_buffer.append(q_idx)\n #self.query_ids.pop(q_idx)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[pos[0]],self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[neg[0]],self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(self.queries[q_id]),\n \"doc_pos_tokens\": self.get_tokenized_document(self.collection[pos[0]]),\n \"doc_neg_tokens\": self.get_tokenized_document(self.collection[neg[0]]),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos[1]))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg[1]))\n\n main_instances.append(Instance(ret_instance))\n\n #if len(main_instances) == self.batch_size:\n # break\n if self.uniqe_pos_only:\n if len(query_idx_remove_buffer) > 0:\n self.query_ids = np.delete(self.query_ids,query_idx_remove_buffer)\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch,None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n \n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "PseudoLabelTextDatasetLoader", "path": "matchmaker/dataloaders/pseudo_label_training_loader.py", "snippet": "class PseudoLabelTextDatasetLoader():\n \"\"\"\n\n \"\"\"\n\n def __init__(\n self,\n\n rankings_with_teacher_scores: str,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences=False,\n random_seed=42,\n ):\n\n self.rankings_with_teacher_scores = rankings_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n self.uniqe_pos_only = False\n\n def __iter__(self) -> Iterator[TensorDict]:\n\n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[PseudoLabel] Loading rankings from:\", self.rankings_with_teacher_scores)\n\n self.triples = [] # query_id pos_id neg_id pos_score neg_score\n\n with open(self.rankings_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split('\\t') # pos_score neg_score query_text pos_text neg_text\n self.triples.append((float(ls[0]), float(ls[1]), ls[2], ls[3], ls[4]))\n\n console.log(f\"[TripleId] Done loading! Using {len(self.triples)} triples\")\n\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n\n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while True:\n\n main_instances = []\n\n while len(main_instances) < self.batch_size:\n\n pos_score, neg_score, q_text, pos_text, neg_text = random.choice(self.triples)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(q_text, pos_text,\n self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(q_text, neg_text,\n self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(q_text),\n \"doc_pos_tokens\": self.get_tokenized_document(pos_text),\n \"doc_neg_tokens\": self.get_tokenized_document(neg_text),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos_score))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg_score))\n\n main_instances.append(Instance(ret_instance))\n\n if len(main_instances) == self.batch_size:\n break\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch, None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n\n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "TripleIdDatasetLoader", "path": "matchmaker/dataloaders/triple_id_training_loader.py", "snippet": "class TripleIdDatasetLoader():\n \"\"\"\n \n \"\"\"\n\n def __init__(\n self,\n\n query_file: str,\n collection_file: str,\n triples_with_teacher_scores: str,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences = False,\n random_seed=42,\n ):\n\n self.query_file = query_file\n self.collection_file = collection_file\n self.triples_with_teacher_scores = triples_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n def __iter__(self) -> Iterator[TensorDict]:\n \n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[TripleId] Loading rankings from:\",self.triples_with_teacher_scores)\n self.triples = [] # query_id pos_id neg_id pos_score neg_score\n\n with open(self.triples_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split() # pos_score neg_score query_id pos_id neg_id\n self.triples.append((ls[2],ls[3],ls[4],float(ls[0]),float(ls[1])))\n\n console.log(\"[TripleId] Loading collection from:\",self.collection_file)\n self.collection = {}\n self.collection_ids = []\n with open(self.collection_file, \"r\", encoding=\"utf8\") as cf:\n for line in cf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.collection[ls[0]] = ls[1].rstrip()[:100_000]\n self.collection_ids.append(ls[0])\n\n console.log(\"[TripleId] Loading queries from:\",self.query_file)\n self.queries = {}\n with open(self.query_file, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.queries[ls[0]] = ls[1].rstrip()\n\n console.log(f\"[TripleId] Done loading! Using {len(self.triples)} triples\")\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n \n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while True:\n\n main_instances = []\n\n while len(main_instances) < self.batch_size:\n\n q_id,pos_id,neg_id,pos_score,neg_score = random.choice(self.triples)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[pos_id],self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[neg_id],self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(self.queries[q_id]),\n \"doc_pos_tokens\": self.get_tokenized_document(self.collection[pos_id]),\n \"doc_neg_tokens\": self.get_tokenized_document(self.collection[neg_id]),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos_score))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg_score))\n\n main_instances.append(Instance(ret_instance))\n\n if len(main_instances) == self.batch_size:\n break\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch,None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n \n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "BlingFireTokenizer", "path": "matchmaker/dataloaders/bling_fire_tokenizer.py", "snippet": "class BlingFireTokenizer():\n \"\"\"\n basic tokenizer using bling fire library\n \"\"\"\n\n def tokenize(self, sentence: str) -> List[Token]:\n return [Token(t) for t in text_to_words(sentence).split()]" }, { "identifier": "FastTransformerTokenizer", "path": "matchmaker/dataloaders/transformer_tokenizer.py", "snippet": "class FastTransformerTokenizer():\n \"\"\"\n basic wrapper for an HuggingFace AutoTokenizer\n \"\"\"\n\n def __init__(self, model,add_unique_ids=False,uniqueness_type=\"lower\",create_global_id=False):\n\n if \"t5\" in model:\n self._tokenizer = T5Tokenizer.from_pretrained(model)\n # when generating, we will use the logits of right-most token to predict the next token\n # so the padding should be on the left\n self._tokenizer.padding_side = \"left\"\n self._tokenizer.pad_token = self._tokenizer.eos_token # to avoid an error\n elif \"bart\" in model:\n self._tokenizer = BartTokenizer.from_pretrained(model)\n else:\n self._tokenizer = AutoTokenizer.from_pretrained(model)\n\n self.add_unique_ids = add_unique_ids\n if self.add_unique_ids:\n self.pre_tokenzier = BertPreTokenizer()\n\n from nltk.stem.porter import PorterStemmer\n self.stemmer = PorterStemmer()\n \n self.uniqueness_type = uniqueness_type # or \"stemmed\"\n self.create_global_id = create_global_id\n\n self.stem_cache = {}\n\n def tokenize(self, sentence: str, sentence2: str = None, max_length: int = 512, padding=False, random_spans=False):\n if sentence2 != None:\n seq_tokenized = self._tokenizer(sentence, sentence2,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n\n else:\n if random_spans:\n sentences = nltk.sent_tokenize(sentence)\n sentence_ids = list(range(len(sentences)))\n random.shuffle(sentence_ids)\n sent_length = 0\n sentence = ''\n for id in sentence_ids:\n sent = sentences[id]\n if len(sent.split(' ')) + sent_length < 512:\n sentence = sentence + sent\n sent_length = len(sent.split(' '))\n\n seq_tokenized = self._tokenizer(sentence,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n else:\n seq_tokenized = self._tokenizer(sentence,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n\n #\n # only used for ColBERTer model\n #\n if self.add_unique_ids:\n\n seq_tokenized.data[\"unique_input_ids\"] = torch.unique(seq_tokenized.data[\"input_ids\"])\n \n # these are the wordpiece-subwords\n tf_offsets = seq_tokenized.encodings[0].offsets\n\n # these are the whole-word offsets (subwords are not split yet), but it uses the exact same splitting mechanism\n whole_word_offsets = self.pre_tokenzier.pre_tokenize_str(sentence)\n\n # create unique_token_dict\n whole_word_unique = {}\n for i,(tok,offsets) in enumerate(whole_word_offsets):\n if self.uniqueness_type == \"stemmed\":\n lower_tok = tok.lower()\n if lower_tok not in self.stem_cache:\n tok_transformed = self.stemmer.stem(lower_tok)\n self.stem_cache[lower_tok] = tok_transformed\n else:\n tok_transformed = self.stem_cache[lower_tok]\n else:\n tok_transformed = tok.lower()\n\n whole_word_offsets[i] = (tok_transformed,offsets)\n \n if tok_transformed not in whole_word_unique:\n if self.create_global_id:\n hashed = int.from_bytes(hashlib.sha256(tok_transformed.encode('utf-8')).digest()[:4], 'little', signed=False) # 32-bit int\n # 0 is a reserved id for padding, don't think this will happen often though\n if hashed == 0:\n hashed = 1\n \n if hashed < 0 or hashed > 4294967295:\n #if hashed < -2147483648 or hashed > 2147483647:\n print(\"Warning: hash value is too large, will be truncated to 32-bit int\")\n whole_word_unique[tok_transformed] = hashed\n else:\n whole_word_unique[tok_transformed] = len(whole_word_unique) + 1\n\n # map tf_offsets to whole_word_unique\n tf_input_ids_to_whole_word_unique_map = torch.zeros_like(seq_tokenized.data[\"input_ids\"])\n for i,tf_offset in enumerate(tf_offsets[1:-1]): # ignore special tokens\n for whole_word_token,whole_word_offset in whole_word_offsets:\n if tf_offset[0] >= whole_word_offset[0] and tf_offset[1] <= whole_word_offset[1]:\n tf_input_ids_to_whole_word_unique_map[0][i+1] = whole_word_unique[whole_word_token]\n break\n \n # if the tokenizer cuts off the sequence, we might have some tokens that are in the pre-tokenizer, but not mapped\n # because they only appear in the end and where cut -> in this case we just remove them also from the unique list\n # as the main tokenizer is the main anchor point\n skipped_whole_word =[]\n for tok,i in whole_word_unique.items():\n if i not in tf_input_ids_to_whole_word_unique_map[0]:\n skipped_whole_word.append(tok)\n for tok in skipped_whole_word:\n del whole_word_unique[tok]\n\n #\n # this is just sanity checking to make sure that the mapping is correct\n #\n #if (tf_input_ids_to_whole_word_unique_map[0][1:-1] == 0).any():\n # missing_ids = seq_tokenized.data[\"input_ids\"][0][1:-1][tf_input_ids_to_whole_word_unique_map[0][1:-1] == 0]\n # missing_toks = self._tokenizer.convert_ids_to_tokens(missing_ids)\n # if not (len(set(missing_toks)) <= 2 and ((set(missing_toks) == set([\"[PAD]\", \"[SEP]\"])) or missing_toks[0] == \"[PAD]\")):\n # print(\"WARNING: some tokens were not found in the whole_word dictionary\",missing_toks,\"in sentence:\", sentence, \"with offset:\", whole_word_offsets,\"unique_words\", whole_word_unique)\n\n seq_tokenized.data[\"input_ids_to_words_map\"] = tf_input_ids_to_whole_word_unique_map\n seq_tokenized.data[\"unique_words\"] = torch.from_numpy(numpy.array(list(whole_word_unique.values()),dtype=numpy.int64)).unsqueeze(0)\n\n for _, d in seq_tokenized.data.items():\n d.squeeze_(0)\n return seq_tokenized.data" }, { "identifier": "PretrainedBertIndexerNoSpecialTokens", "path": "matchmaker/modules/bert_embedding_token_embedder.py", "snippet": "class PretrainedBertIndexerNoSpecialTokens(PretrainedTransformerIndexer):\n\n \"\"\"\n A ``TokenIndexer`` corresponding to a pretrained BERT model.\n Parameters\n ----------\n pretrained_model: ``str``\n Either the name of the pretrained model to use (e.g. 'bert-base-uncased'),\n or the path to the .txt file with its vocabulary.\n If the name is a key in the list of pretrained models at\n https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/tokenization.py#L33\n the corresponding path will be used; otherwise it will be interpreted as a path or URL.\n use_starting_offsets: bool, optional (default: False)\n By default, the \"offsets\" created by the token indexer correspond to the\n last wordpiece in each word. If ``use_starting_offsets`` is specified,\n they will instead correspond to the first wordpiece in each word.\n do_lowercase: ``bool``, optional (default = True)\n Whether to lowercase the tokens before converting to wordpiece ids.\n never_lowercase: ``List[str]``, optional\n Tokens that should never be lowercased. Default is\n ['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]'].\n max_pieces: int, optional (default: 512)\n The BERT embedder uses positional embeddings and so has a corresponding\n maximum length for its input ids. Any inputs longer than this will\n either be truncated (default), or be split apart and batched using a\n sliding window.\n truncate_long_sequences : ``bool``, optional (default=``True``)\n By default, long sequences will be truncated to the maximum sequence\n length. Otherwise, they will be split apart and batched using a\n sliding window.\n \"\"\"\n\n def __init__(\n self,\n pretrained_model: str,\n use_starting_offsets: bool = False,\n do_lowercase: bool = True,\n never_lowercase: List[str] = None,\n max_pieces: int = 512,\n truncate_long_sequences: bool = True,\n ) -> None:\n\n bert_tokenizer = PretrainedTransformerTokenizer(pretrained_model, do_lower_case=do_lowercase)\n super().__init__(\n vocab=bert_tokenizer.vocab,\n wordpiece_tokenizer=bert_tokenizer.wordpiece_tokenizer.tokenize,\n namespace=\"bert\",\n use_starting_offsets=use_starting_offsets,\n max_pieces=max_pieces,\n do_lowercase=do_lowercase,\n never_lowercase=never_lowercase,\n start_tokens=[],\n end_tokens=[],\n separator_token=\"[SEP]\",\n truncate_long_sequences=truncate_long_sequences,\n )\n\n def __eq__(self, other):\n if isinstance(other, PretrainedBertIndexerNoSpecialTokens):\n for key in self.__dict__:\n if key == \"wordpiece_tokenizer\":\n # This is a reference to a function in the huggingface code, which we can't\n # really modify to make this clean. So we special-case it.\n continue\n if self.__dict__[key] != other.__dict__[key]:\n return False\n return True\n return NotImplemented" } ]
import torch import numpy import random import torch.multiprocessing as mp from allennlp.data.samplers import BucketBatchSampler, MaxTokensBatchSampler from allennlp.data.vocabulary import Vocabulary from allennlp.data.data_loaders import MultiProcessDataLoader from transformers import T5Tokenizer from allennlp.data.token_indexers import PretrainedTransformerIndexer from allennlp.data.tokenizers import PretrainedTransformerTokenizer from matchmaker.dataloaders.concatenated_reranking_loader import * from matchmaker.dataloaders.concatenated_training_loader import * from matchmaker.dataloaders.independent_reranking_loader import * from matchmaker.dataloaders.independent_training_loader import * from matchmaker.dataloaders.id_sequence_loader import * from matchmaker.dataloaders.mlm_masked_sequence_loader import * from matchmaker.dataloaders.query_generation_inference_loader import ConditionalQueryGenerationInferenceReader from matchmaker.dataloaders.tas_balanced_training_loader import * from matchmaker.dataloaders.pseudo_label_training_loader import PseudoLabelDatasetLoader, PseudoLabelTextDatasetLoader from matchmaker.dataloaders.triple_id_training_loader import TripleIdDatasetLoader from transformers import AutoTokenizer from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer from matchmaker.dataloaders.transformer_tokenizer import FastTransformerTokenizer from matchmaker.modules.bert_embedding_token_embedder import PretrainedBertIndexerNoSpecialTokens from typing import Dict, Tuple, List
12,241
max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"] reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=max_length, target_distribution_file=run_config["target_distribution_file"], target_number_of_queries_total=run_config["target_number_of_queries_total"]) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def _get_indexer(model_config, max_length): # default values _tokenizer = BlingFireTokenizer() _vocab = Vocabulary() if model_config["token_embedder_type"] == "embedding": _token_indexers = {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)} _vocab = Vocabulary.from_files(model_config["vocab_directory"]) elif model_config["token_embedder_type"] == "bert_embedding" or model_config["token_embedder_type"] == "bert_vectors": _tokenizer = PretrainedTransformerTokenizer(model_config["bert_pretrained_model"], do_lowercase=True, start_tokens=[], end_tokens=[]) _ind = PretrainedBertIndexerNoSpecialTokens(pretrained_model=model_config["bert_pretrained_model"], do_lowercase=True, max_pieces=max_length) _token_indexers = {"tokens": _ind} elif model_config["token_embedder_type"].startswith("bert"): model = model_config["bert_pretrained_model"] if "facebook/dpr" in model: model = "bert-base-uncased" # should be the right one (judging from paper + huggingface doc)
#from tokenizers import ByteLevelBPETokenizer,CharBPETokenizer #from matchmaker.dataloaders.transformer_tokenizer import CustomTransformerTokenizer,CustomTransformerIndexer mp.set_sharing_strategy("file_system") # VERY MUCH needed for linux !! makes everything faster, but tends to break stuff def allennlp_single_sequence_loader(model_config, run_config, _input_file, sequence_type, force_exact_batch_size=False): ''' Load examples from a .tsv file in the single sequence format: id<tab>text (Using allennlp's v2 multiprocess loader) ''' if model_config.get("model_input_type", "") == "mlm": sequence_type == "single_mlm" if sequence_type == "query": max_length = run_config.get("overwrite_max_query_length", model_config["max_query_length"]) min_length = model_config.get("min_query_length",-1) batch_size = run_config["query_batch_size"] split_document=False split_document_window_size=-1 if sequence_type == "single_mlm": max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length", -1) batch_size = run_config.get("collection_batch_size", run_config["batch_size_train"]) make_multiple_of=run_config.get("make_multiple_of",8) mask_probability=run_config.get("mask_probability",0.1) mlm_mask_replace_probability=run_config.get("mlm_mask_replace_probability",0.5) mlm_mask_random_probability=run_config.get("mlm_mask_random_probability",0.5) else: # doc max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length",-1) batch_size = run_config["collection_batch_size"] split_document=run_config.get("split_document",False) split_document_window_size=run_config.get("split_document_window_size",-1) _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max_length) #if model_config.get("model_input_type", "") == "mlm": # reader = MLMMaskedSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=max_length, min_doc_length=min_length, # mask_probability=mask_probability, # mlm_mask_replace_probability=mlm_mask_replace_probability, # mlm_mask_random_probability=mlm_mask_random_probability, # make_multiple_of=make_multiple_of) reader = IdSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, split_document=split_document,split_document_window_size=split_document_window_size, max_seq_length=max_length, min_seq_length=min_length, sequence_type=sequence_type) if force_exact_batch_size: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=int(batch_size)) else: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["seq_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_triple_training_loader(model_config, run_config, _input_file,add_text_to_batch=False): ''' Load training examples (either in the re-ranking text file format or a dynamic loader) (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if run_config.get("dynamic_sampler", False) == False: if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) else: reader = IndependentTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], query_augment_mask_number=run_config["query_augment_mask_number"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) else: #if run_config["dynamic_sampler_type"] == "list": # loader = IrDynamicTripleDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], # qrels_file=run_config["dynamic_qrels_file"], candidate_file=run_config["dynamic_candidate_file"], # batch_size=int(run_config["batch_size_train"]), queries_per_batch=run_config["dynamic_queries_per_batch"], tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], # min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], # data_augment=run_config["train_data_augment"], vocab=_vocab) if run_config["dynamic_sampler_type"] == "tas_balanced": loader = TASBalancedDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], pairs_with_teacher_scores=run_config["dynamic_pairs_with_teacher_scores"], query_cluster_file=run_config["dynamic_query_cluster_file"], batch_size=int(run_config["batch_size_train"]), clusters_per_batch=run_config["dynamic_clusters_per_batch"], tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], pair_balancing_strategy=run_config["tas_balanced_pair_strategy"],random_seed =run_config["random_seed"]) elif run_config["dynamic_sampler_type"] == "pseudo_label": loader = PseudoLabelDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], selection_type=run_config["pseudo_label_selection_type"],min_pos_score=run_config["pseudo_label_min_pos_score"], max_diff_to_be_pos=run_config["pseudo_label_max_diff_to_be_pos"],min_diff_to_neg=run_config["pseudo_label_min_diff_to_neg"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "pseudo_labeltext": loader = PseudoLabelTextDatasetLoader(rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"] reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=max_length, target_distribution_file=run_config["target_distribution_file"], target_number_of_queries_total=run_config["target_number_of_queries_total"]) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def _get_indexer(model_config, max_length): # default values _tokenizer = BlingFireTokenizer() _vocab = Vocabulary() if model_config["token_embedder_type"] == "embedding": _token_indexers = {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)} _vocab = Vocabulary.from_files(model_config["vocab_directory"]) elif model_config["token_embedder_type"] == "bert_embedding" or model_config["token_embedder_type"] == "bert_vectors": _tokenizer = PretrainedTransformerTokenizer(model_config["bert_pretrained_model"], do_lowercase=True, start_tokens=[], end_tokens=[]) _ind = PretrainedBertIndexerNoSpecialTokens(pretrained_model=model_config["bert_pretrained_model"], do_lowercase=True, max_pieces=max_length) _token_indexers = {"tokens": _ind} elif model_config["token_embedder_type"].startswith("bert"): model = model_config["bert_pretrained_model"] if "facebook/dpr" in model: model = "bert-base-uncased" # should be the right one (judging from paper + huggingface doc)
_tokenizer = FastTransformerTokenizer(model,
5
2023-11-21 10:38:22+00:00
16k
MICLab-Unicamp/medpseg
medpseg/poly_pipeline.py
[ { "identifier": "PolySeg2DModule", "path": "medpseg/poly_seg_2d_module.py", "snippet": "class PolySeg2DModule(pl.LightningModule):\n '''\n Regarding of the name, also works with 3D networks\n '''\n def __init__(self, hparams):\n '''\n Check starter.py for description of all hparams\n '''\n super().__init__()\n self.save_hyperparameters(hparams)\n\n ####### Hyperparameters used during development, ignore this its confusing #######\n self.pretraining = self.hparams.pretraining\n self.findings_only = getattr(self.hparams, \"findings_only\", False)\n self.weight_decay = getattr(self.hparams, \"weight_decay\", None)\n self.scheduling_factor = getattr(self.hparams, \"scheduling_factor\", None)\n self.scheduling = getattr(self.hparams, \"scheduling\", \"step\")\n self.scratch = getattr(self.hparams, \"scratch\", False)\n self.expand_bifpn = getattr(self.hparams, \"expand_bifpn\", \"conv\")\n self.backbone = getattr(self.hparams, \"backbone\", \"effnet\")\n self.val_3d = getattr(self.hparams, \"val_3d\", False)\n self.gdl = getattr(self.hparams, \"gdl\", False)\n self.bdl = getattr(self.hparams, \"bdl\", False)\n self.focal = getattr(self.hparams, \"focal\", False)\n self.atmbranch = getattr(self.hparams, \"atmbranch\", None)\n self.vesselbranch = getattr(self.hparams, \"vesselbranch\", None)\n self.recbranch = getattr(self.hparams, \"recbranch\", None)\n self.include_bg = getattr(self.hparams, \"include_background\", False)\n self.unet = getattr(self.hparams, \"unet\", False)\n self.unettr = getattr(self.hparams, \"unettr\", False)\n self.poly_level = getattr(self.hparams, \"poly_level\", None)\n self.flag_3d_metric = '_3d' if self.val_3d or self.unettr else ''\n self.excluded_average_metric_keys = [\"volume_similarity\", \"avg_hd\", \"hd\"]\n self.downstream_method = getattr(self.hparams, \"downstream_method\", None)\n self.perceptual_loss = getattr(self.hparams, \"perceptual_loss\", False)\n self.stem_replacement = getattr(self.hparams, \"stem_replacement\", False)\n self.new_latent_space = getattr(self.hparams, \"new_latent_space\", False)\n self.compound_coef = getattr(self.hparams, \"compound_coef\", 4)\n self.consistency = getattr(self.hparams, \"consistency\", False)\n self.imnet_norm = getattr(self.hparams, \"imnet_norm\", False)\n self.learnable_norm = getattr(self.hparams, \"learnable_norm\", False)\n self.circulatory_branch = getattr(self.hparams, \"circulatory_branch\", None)\n self.bifpn_channels = getattr(self.hparams, \"bifpn_channels\", 128)\n self.combined_loss = getattr(self.hparams, \"combined_loss\", False)\n self.sam = getattr(self.hparams, \"sam\", False)\n self.freeze_encoder = getattr(self.hparams, \"freeze_encoder\", False)\n self.batchfy_e2d = getattr(self.hparams, \"batchfy_e2d\", False)\n self.circulatory_regularization = getattr(self.hparams, \"circulatory_regularization\", False)\n self.medseg3d = getattr(self.hparams, \"medseg3d\", False)\n self.fpn_c = getattr(self.hparams, \"fpn_c\", None)\n # Post ATS ideas\n self.soft_circulatory = getattr(self.hparams, \"soft_circulatory\", False)\n self.poi_loss = getattr(self.hparams, \"poi_loss\", False)\n self.nrdice_loss = getattr(self.hparams, \"nrdice_loss\", False)\n self.polyunet25d = getattr(self.hparams, \"polyunet25d\", False)\n self.polyunet3d = getattr(self.hparams, \"polyunet3d\", False)\n self.mccl = getattr(self.hparams, \"mccl\", False)\n self.tversky = getattr(self.hparams, \"tversky\", False)\n self.airway_ths = getattr(self.hparams, \"airway_ths\", 0.5)\n self.vessel_ths = getattr(self.hparams, \"vessel_ths\", 0.5)\n self.self_attention = getattr(self.hparams, \"self_attention\", False)\n self.deep_supervision = getattr(self.hparams, \"deep_supervision\", False)\n self.con_detect = getattr(self.hparams, \"con_detect\", False)\n self.celoss = getattr(self.hparams, \"celoss\", False)\n self.large = getattr(self.hparams, \"large\", False)\n self.combined_gdl = getattr(self.hparams, \"combined_gdl\", False)\n self.full_silver = getattr(self.hparams, \"preprocess\", '') == \"full_silver_poly_3levels_circulatory\"\n if self.full_silver:\n print(\"Full silver mode detected, every item on batch must be fullsilver preprocess\")\n ####### Hyperparameters used during development, ignore this its confusing #######\n\n # Determine offset for polymorphic labels depending on poly level\n # Poly level:\n # None: supervised training only\n # 0: self supervised only\n # 2: lung -> unhealthy/healthy\n # 3: unhealthy -> GGO/CON\n self.nlossterms = 0\n if self.poly_level == 3: # Previous logic for this was wrong, changing to count from beginning\n self.simple_offset = 2 # BG + Lung\n self.detailed_offset = 3 # BG + Healthy + Unhealthy\n else:\n self.simple_offset = 2 # BG + Lung\n self.detailed_offset = None # Not present if not poly_level 3\n\n # Redundant argument necessary to not tie module to data preprocessing\n if \"poly_3levels\" in self.hparams.preprocess:\n assert self.poly_level == 3 or self.poly_level == 2\n\n self.two5d = True\n self.model = MEDSeg(self.hparams.nin, self.hparams.seg_nout, apply_sigmoid=False, backbone=self.backbone, expand_bifpn=self.expand_bifpn, pretrained=not self.scratch,\n num_classes_atm=self.atmbranch, num_classes_vessel=self.vesselbranch, num_classes_rec=self.recbranch, stem_replacement=self.stem_replacement, new_latent_space=self.new_latent_space,\n compound_coef=self.compound_coef, imnet_norm=self.imnet_norm, learnable_norm=self.learnable_norm, circulatory_branch=self.circulatory_branch,\n bifpn_channels=self.bifpn_channels, sam_embedding=self.sam, self_attention=self.self_attention, deep_supervision=self.deep_supervision,\n con_detecting=self.con_detect, large=self.large, soft_circulatory=self.soft_circulatory)\n \n self.pretrained_weights = self.hparams.pretrained_weights\n if self.pretrained_weights is not None:\n print(f\"Loading pretrained weights from {self.pretrained_weights}\")\n self.model = PolySeg2DModule.load_from_checkpoint(self.pretrained_weights).model\n\n # Supervised loss\n assert (not(self.combined_loss) or not(self.nrdice_loss)) and (not(self.combined_loss) or not(self.mccl)) and (not(self.nrdice_loss) or not(self.mccl)), \"Cant do combined loss and nrdice loss or combined loss and mccl at the same time\"\n \n if self.combined_loss:\n print(\"Combined Loss\")\n self.lossfn = CombinedLoss(include_background=self.include_bg, cross_entropy=self.celoss, gdl=self.combined_gdl, soft_circulatory=self.soft_circulatory)\n self.dicer = DICEMetric(per_channel_metric=True, check_bounds=False)\n\n print('-'*100 + \n f\"\\nPoly2D Module in the following configuration:\"\n f\"\\npoly_level: {self.poly_level} soft_circulatory: {self.soft_circulatory}\"\n f\"\\nnin: {self.hparams.nin} main_nout: {self.hparams.seg_nout}, DS: {self.deep_supervision}, SA: {self.self_attention}\"\n f\"\\nMEDSeg 3D? {self.medseg3d}\\n\" +\n '-'*100)\n\n def save_pt_model(self, path):\n torch.save(self.model.state_dict(), path)\n\n def load_pt_model(self, path):\n self.model.load_state_dict(torch.load(path))\n\n def visual_debug(self, x, y, label):\n pass\n\n def forward(self, x, stacking=False):\n if self.val_3d and not self.training and not stacking: # either training, or bein in val_3d or stacking flag avoids this branch and...\n return real_time_stack_predict(self, x, self.hparams.eval_batch_size, extended_2d=self.hparams.extended_2d, num_workers=self.hparams.nworkers, device=torch.device(\"cpu\") if self.hparams.cpu else x.device)\n else: # ...we return direct slice activations\n y_hat = self.model(x) \n if isinstance(y_hat, dict):\n for k in y_hat.keys():\n if 'atm' in k or 'vessel' in k:\n if self.soft_circulatory:\n y_hat[k] = y_hat[k].softmax(dim=1) \n else:\n y_hat[k] = y_hat[k].sigmoid()\n elif 'main' in k:\n y_hat[k] = y_hat[k].softmax(dim=1)\n else:\n raise ValueError(f\"Unexpected key in MEDSeg return: {k}\")\n if self.hparams.debug and not stacking:\n print(\"y_hat state:\")\n for k, v in y_hat.items():\n print(f\"{k}: {v.shape}\")\n else:\n y_hat = y_hat.softmax(dim=1)\n if self.hparams.debug and not stacking:\n print(f\"y_hat state: {y_hat.shape}\")\n \n return y_hat\n\n # Main branch forms ##################################\n def simple_level(self, y_hat, y, simple, ds, do_loss):\n '''\n Where we train on lung masks only. \n '''\n if self.full_silver and self.training:\n raise RuntimeError(\"Shouldn't be running simple_level on full_silver\")\n \n if isinstance(y_hat, dict):\n lung = y_hat[\"main\"][simple, 1:].sum(dim=1, keepdim=True) # lung is everything after bg summed\n y_hat_simple = torch.cat([y_hat[\"main\"][simple, :1], lung], dim=1) # 2 channel bg + lung on simple cases\n else:\n lung = y_hat[simple, 1:].sum(dim=1, keepdim=True) # lung is everything after bg summed\n y_hat_simple = torch.cat([y_hat[simple, :1], lung], dim=1) # bg + lung on simple cases\n \n # WANING: Boundary Loss deprecated, no significant difference shown \n if self.simple_offset is None: # poly simplification removes unhealthy label\n y_simple = y[simple] \n else:\n y_simple = y[simple, :self.simple_offset] \n NS = y_simple.shape[0]\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n simple_loss = self.lossfn(y_hat_simple, y_simple)\n else:\n simple_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_simple_argmax = y_hat_simple.argmax(dim=1, keepdim=True)\n y_hat_lung = y_hat_simple_argmax == 1\n for ns in range(NS):\n struct_names = [\"lung\"]\n seg_metrics(gts=y_simple[ns, 1:2].cpu().numpy().astype(np.uint8), preds=y_hat_lung.detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for simplified level not implemented\")\n \n return simple_loss\n\n def detailed_level(self, y_hat, y, detailed, ds, do_loss):\n '''\n Where we train on Healthy/Unhealthy masks\n Still supports old 2.5D validation metrics do pretraining project\n '''\n if self.full_silver and self.training:\n raise RuntimeError(\"Shouldn't be running detailed_level on full_silver\")\n \n if isinstance(y_hat, dict): \n if self.poly_level == 3: # if we have ggo and con outputs, reduce then\n unhealthy = y_hat[\"main\"][detailed, 2:].sum(dim=1, keepdim=True) # GGO + CON = unhealthy\n y_hat_detailed = torch.cat([y_hat[\"main\"][detailed, :2], unhealthy], dim=1) # Concating BG, Healthy with unhealthy\n else:\n y_hat_detailed = y_hat[\"main\"][detailed]\n else:\n if self.poly_level == 3: # if we have ggo and con outputs, reduce then\n unhealthy = y_hat[detailed, 2:].sum(dim=1, keepdim=True) # GGO + CON = unhealthy\n y_hat_detailed = torch.cat([y_hat[detailed, :2], unhealthy], dim=1) # Concating BG, Healthy with unhealthy\n else:\n y_hat_detailed = y_hat[detailed]\n \n # Logic to separate concatenations on x and y. Kind of complicated\n # Although boundary loss is implemented, early experiments showed it not being signifcantly better so, deprecated.\n if self.detailed_offset is None:\n y_detailed = y[detailed]\n else:\n y_detailed = y[detailed, :self.detailed_offset] \n ND = y_detailed.shape[0]\n\n # Loss can be disabled to accelerate validation\n if do_loss:\n detailed_loss = self.lossfn(y_hat_detailed, y_detailed)\n else:\n detailed_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_detailed_argmax = y_hat_detailed.argmax(dim=1, keepdim=True)\n y_hat_detailed = torch.cat((y_hat_detailed_argmax == 1, y_hat_detailed_argmax == 2), dim=1)\n for nd in range(ND):\n struct_names = [\"healthy\", \"unhealthy\"]\n seg_metrics(gts=y_detailed[nd, 1:3].cpu().numpy().astype(np.uint8), preds=y_hat_detailed[nd, :2].detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n healthy_metric, unhealthy_metric = self.dicer(y_hat_detailed[:, 1:3], y_detailed[:, 1:3])\n self.log(\"healthy_dice\", healthy_metric, on_epoch=True, on_step=False, prog_bar=False)\n self.log(\"unhealthy_dice\", unhealthy_metric, on_epoch=True, on_step=False, prog_bar=False)\n\n return detailed_loss\n\n def separation_level(self, y_hat, y, separation, ds, do_loss):\n '''\n Where we train on separating GGO and Consolidations \n (semi-supervised through threshold + unhealthy label)\n\n One day might be manual labels too\n '''\n if isinstance(y_hat, dict):\n y_hat_separation = y_hat[\"main\"][separation][:, :4]\n else:\n y_hat_separation = y_hat[separation][:, :4]\n\n y_separation = y[separation][:, :4]\n ND = y_separation.shape[0]\n\n # Loss can be disabled to accelerate validation\n if do_loss:\n separation_loss = self.lossfn(y_hat_separation, y_separation)\n else:\n separation_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_separation_argmax = y_hat_separation.argmax(dim=1, keepdim=True)\n y_hat_separation = torch.cat((y_hat_separation_argmax == 2, y_hat_separation_argmax == 3), dim=1)\n for nd in range(ND):\n struct_names = [\"ggo\", \"con\"]\n seg_metrics(gts=y_separation[nd, 2:4].cpu().numpy().astype(np.uint8), preds=y_hat_separation[nd, :2].detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n\n return separation_loss\n ####################################################\n\n # ATM branch computations\n def atm_branch(self, y_hat, y, atm, ds, do_loss):\n '''\n where we optimize atm parts of the batch, binary label\n '''\n if self.full_silver and self.training:\n if self.soft_circulatory:\n bg = torch.ones_like(y[atm, 5:6]) - y[atm, 5:6]\n y_airway = torch.cat([bg, y[atm, 5:6]], dim=1)\n y_hat_airway = y_hat[\"atm\"][atm, :2] \n else:\n raise RuntimeError(\"Why are you running full_silver without SoftCirculatory\")\n else:\n if self.soft_circulatory:\n y_airway = y[atm, :2] # Taking one hot map\n y_hat_airway = y_hat[\"atm\"][atm, :2] # output has 2 channels\n else:\n y_airway = y[atm, 1:2] # 0 is BG, taking binary airway map\n y_hat_airway = y_hat[\"atm\"][atm, :1] # output has only 1 channel\n NS = y_airway.shape[0] # nsamples\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n atm_loss = self.lossfn(y_hat_airway, y_airway)\n else:\n atm_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n # Making sure to get the correct activation when softmax (soft_circulatory) is turned on.\n if self.soft_circulatory:\n # Note that this is already 0 and 1 after argmax\n binary_y_hat_airway = y_hat_airway.detach().argmax(dim=1, keepdim=True).cpu().numpy().astype(np.uint8)\n binary_y_airway = y_airway[:, 1:2].cpu().numpy().astype(np.uint8)\n else:\n # Split sigmoid on THS\n binary_y_hat_airway = (y_hat_airway.detach() > self.airway_ths).cpu().numpy().astype(np.uint8)\n binary_y_airway = y_airway[:, 0:1].cpu().numpy().astype(np.uint8)\n assert binary_y_hat_airway.shape[1] == 1 and binary_y_hat_airway.max() <= 1\n\n for ns in range(NS):\n struct_names = [\"airway\"]\n seg_metrics(gts=binary_y_airway[ns], \n preds=binary_y_hat_airway[ns],\n metrics=self.metrics, \n struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for atm not implemented\")\n \n return atm_loss\n\n # Vessel branch computations\n def vessel_branch(self, y_hat, y, vessel, ds, do_loss):\n '''\n where we optimize atm parts of the batch\n '''\n '''\n where we optimize atm parts of the batch, binary label\n '''\n if self.full_silver and self.training:\n if self.soft_circulatory:\n bg = torch.ones_like(y[vessel, 4:5]) - y[vessel, 4:5]\n y_vessel = torch.cat([bg, y[vessel, 4:5]], dim=1)\n y_hat_vessel = y_hat[\"vessel\"][vessel, :2] \n else:\n raise RuntimeError(\"Why are you running full_silver without SoftCirculatory\")\n else:\n if self.soft_circulatory:\n y_vessel = y[vessel, :2] # Taking one hot map\n y_hat_vessel = y_hat[\"vessel\"][vessel, :2] # output has 2 channels\n else:\n y_vessel = y[vessel, 1:2] # 0 is BG, taking binary airway map\n y_hat_vessel = y_hat[\"vessel\"][vessel, :1] # output has only 1 channel\n \n NS = y_vessel.shape[0] # nsamples\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n vessel_loss = self.lossfn(y_hat_vessel, y_vessel)\n else:\n vessel_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n # Making sure to get the correct activation when softmax (soft_circulatory) is turned on.\n if self.soft_circulatory:\n # Note that this is already 0 and 1 after argmax\n binary_y_hat_vessel = y_hat_vessel.detach().argmax(dim=1, keepdim=True).cpu().numpy().astype(np.uint8)\n binary_y_vessel = y_vessel[:, 1:2].cpu().numpy().astype(np.uint8)\n else:\n # Split sigmoid on THS\n binary_y_hat_vessel = (y_hat_vessel.detach() > self.vessel_ths).cpu().numpy().astype(np.uint8)\n binary_y_vessel = y_vessel[:, 0:1].cpu().numpy().astype(np.uint8)\n assert binary_y_hat_vessel.shape[1] == 1 and binary_y_hat_vessel.max() <= 1\n\n for ns in range(NS):\n struct_names = [\"vessel\"]\n seg_metrics(gts=binary_y_vessel[ns], \n preds=binary_y_hat_vessel[ns],\n metrics=self.metrics, \n struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for vessel not implemented\")\n \n return vessel_loss\n\n def debug_batch(self, simple, detailed, separation, atm, vessel, y, meta):\n if self.hparams.debug:\n print(f\"Training? {self.training}\")\n print(\"Simple\")\n print(simple)\n print(\"Detailed\")\n print(detailed)\n print(\"Separation\")\n print(separation)\n print(\"ATM\")\n print(atm)\n print(\"Vessel (parse)\")\n print(vessel)\n \n # Assuming B, C, ... format\n preprocess = meta[\"preprocess\"]\n import matplotlib.pyplot as plt\n for i, y_item in enumerate(y):\n item_preprocess = preprocess[i]\n print(y_item.max())\n display_buffer = y_item.cpu().argmax(dim=0).numpy()\n print(display_buffer.max())\n print(f\"Display buffer: {display_buffer.shape}\")\n if os.getenv(\"NSLOTS\") is None:\n if len(display_buffer.shape) == 3:\n pass\n else:\n plt.title(f\"Batch target {i} preprocess {item_preprocess}\")\n plt.imshow(display_buffer)\n plt.show()\n\n def deep_supervision_fn(self, \n loss_fn: Callable, \n key: str, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n index: np.ndarray, \n do_loss: bool):\n loss_acum = []\n \n for i in range(1, 5):\n current_size = (y_hat[key].shape[-2], y_hat[key].shape[-1])\n current_size = (current_size[0]//(2**(i)), current_size[1]//(2**(i)))\n \n transform = Resize(current_size, interpolation=InterpolationMode.NEAREST)\n \n # Craft prediction and target for deep supervision outputs\n new_y_hat = {}\n\n if key == \"main\":\n new_y_hat[key] = y_hat[f\"{key}{i}\"]\n elif key == \"vessel\" or key == \"atm\":\n new_y_hat[key] = y_hat[f\"{key}{i}\"]\n else:\n raise ValueError(f\"Key {key} not valid\")\n\n new_y = transform(y)\n loss = loss_fn(new_y_hat, new_y, index, True, do_loss)\n\n loss_acum.append(loss)\n\n return loss_acum\n\n def compute_loss(self, \n loss_fn: Callable, \n key: str, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n index: np.ndarray, \n do_loss: bool, \n deep_supervision: bool):\n if index.sum() > 0:\n loss = loss_fn(y_hat, y, index, False, do_loss)\n if deep_supervision and self.training:\n loss_acum = self.deep_supervision_fn(loss_fn, key, y_hat, y, index, do_loss)\n # Due to observing good results with only high resolution loss in poly, bumping high resolution weight in optimization\n # To 0.75, with rest of DS contributing to 0.25 of optimization\n loss = ((2**-1)+(2**-2))*loss + (2**-3)*loss_acum[0] + (2**-4)*loss_acum[1] + (2**-5)*loss_acum[2] + (2**-6)*loss_acum[3]\n for i in range(5):\n self.log(f\"{loss_fn.__name__}_deep_supervision_{i}\", loss if i == 0 else loss_acum[i-1], prog_bar=False, on_step=True, on_epoch=True)\n else:\n loss = 0\n\n return loss\n\n def loss_wrapper(self, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n indexes: Dict[str, np.ndarray], \n do_loss: bool, \n deep_supervision: bool):\n simple, detailed, separation, atm, vessel = indexes[\"simple\"], indexes[\"detailed\"], indexes[\"separation\"], indexes[\"atm\"], indexes[\"vessel\"]\n\n simple_loss = self.compute_loss(self.simple_level, \"main\", y_hat, y, simple, do_loss, deep_supervision)\n detailed_loss = self.compute_loss(self.detailed_level, \"main\", y_hat, y, detailed, do_loss, deep_supervision)\n separation_loss = self.compute_loss(self.separation_level, \"main\", y_hat, y, separation, do_loss, deep_supervision)\n atm_loss = self.compute_loss(self.atm_branch, \"atm\", y_hat, y, atm, do_loss, deep_supervision)\n vessel_loss = self.compute_loss(self.vessel_branch, \"vessel\", y_hat, y, vessel, do_loss, deep_supervision)\n\n if do_loss and simple_loss == 0 and detailed_loss == 0 and atm_loss == 0 and separation_loss == 0 and vessel_loss == 0:\n print(\">>>>>>>>>>>>>WARNING: Malformed batch, didn't find any level of polymorphism!<<<<<<<<<<<<<\")\n\n return simple_loss, detailed_loss, separation_loss, atm_loss, vessel_loss\n\n def polymorphic_loss_metrics(self, \n y: torch.Tensor, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n meta: Dict[str, List[str]], \n do_loss: bool = True):\n '''\n ####### Polymorphic training #############\n # Indexes whole batch and perform loss computations separately\n '''\n detailed = np.logical_or(np.logical_or(np.logical_or(np.logical_or(np.array(meta[\"preprocess\"]) == \"seg_raw_new\", np.array(meta[\"preprocess\"]) == \"seg_raw\"), np.array(meta[\"preprocess\"]) == \"msd_seg\"), np.array(meta[\"preprocess\"]) == \"seg_raw_new_hu\"), np.array(meta[\"preprocess\"]) == \"msd_seg_hu\") # Level 2 polymorphism, healthy/unhealthy annotation, cancer\n simple = np.logical_or(np.logical_or(np.array(meta[\"preprocess\"]) == \"pretrain_preprocessing\", np.array(meta[\"preprocess\"]) == \"classification_pretrain_preprocessing\"), np.array(meta[\"preprocess\"]) == \"pretrain_preprocessing_hu\") # Level 1 polymorphism, lung annotation\n separation = np.logical_or(np.array(meta[\"preprocess\"]) == \"separation\", np.array(meta[\"preprocess\"]) == \"manual_split_msc_hu\") # Level 3 polymorphism detect artificial con/ggo separation and correction with transform\n atm = np.logical_or(np.array(meta[\"preprocess\"]) == \"new_atm\", np.array(meta[\"preprocess\"]) == \"new_atm_hu\") # Auxiliary task, airway segmentation\n vessel = np.logical_or(np.array(meta[\"preprocess\"]) == \"parse\", np.array(meta[\"preprocess\"]) == \"parse_hu\") # Auxiliary task, vessel segmentation\n\n if self.full_silver and self.training:\n # The case where every batch item has everything, from teacher network labeling\n separation = np.array([True]*y.shape[0])\n atm = np.array([True]*y.shape[0])\n vessel = np.array([True]*y.shape[0])\n\n self.debug_batch(simple, detailed, separation, atm, vessel, y, meta)\n\n indexes = {\"simple\": simple, \"detailed\": detailed, \"separation\": separation, \"atm\": atm, \"vessel\": vessel}\n\n return self.loss_wrapper(y_hat, y, indexes, do_loss, deep_supervision=self.deep_supervision)\n\n def supervised_loss(self, y, y_hat, meta, prestr):\n '''\n Does all the dozens of losses involved in this training\n This function also computes and logs metrics internally. Only losses are returned to compute the final loss\n '''\n simple_loss, detailed_loss, separation_loss, atm_loss, vessel_loss = self.polymorphic_loss_metrics(y=y, y_hat=y_hat, meta=meta, do_loss=True)\n \n loss = simple_loss + detailed_loss + separation_loss + atm_loss + vessel_loss\n if loss is not None:\n if self.training:\n if simple_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}simple_loss\", simple_loss, on_step=True, on_epoch=True)\n if detailed_loss > 0: \n self.nlossterms += 1\n self.log(f\"{prestr}detailed_loss\", detailed_loss, on_step=True, on_epoch=True)\n if separation_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}separation_loss\", separation_loss, on_step=True, on_epoch=True)\n if atm_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}atm_loss\", atm_loss, on_step=True, on_epoch=True)\n if vessel_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}vessel_loss\", vessel_loss, on_step=True, on_epoch=True)\n \n self.log(f\"{prestr}loss\", loss, on_step=True, on_epoch=True)\n else:\n if simple_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}simple_loss{self.flag_3d_metric}\", simple_loss, on_step=True, on_epoch=True)\n if detailed_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}detailed_loss{self.flag_3d_metric}\", detailed_loss, on_step=True, on_epoch=True)\n if separation_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}separation_loss{self.flag_3d_metric}\", separation_loss, on_step=True, on_epoch=True)\n if atm_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}atm_loss{self.flag_3d_metric}\", atm_loss, on_step=True, on_epoch=True)\n if vessel_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}vessel_loss\", vessel_loss, on_step=True, on_epoch=True)\n \n self.log(f\"{prestr}loss{self.flag_3d_metric}\", loss, on_step=True, on_epoch=True)\n\n return loss\n\n def training_step(self, train_batch, batch_idx):\n '''\n Training step does different things if on exclusive pretraining mode or \n doing traditional supervision.\n\n We only need to return loss for optimizer, metrics are not computed\n '''\n self.nlossterms = 0\n x, y, meta = train_batch\n self.visual_debug(x, y, \"Training\")\n \n y_hat = None\n\n if self.poly_level != 0: # zero polymorphic means pretraining only\n # Traditional supervision\n if y_hat is None:\n y_hat = self.forward(x)\n\n supervised_loss = self.supervised_loss(y=y, y_hat=y_hat, meta=meta, prestr='')\n self.log(\"supervised_loss\", supervised_loss, on_step=True, on_epoch=True)\n else:\n supervised_loss = 0\n \n final_loss = supervised_loss/self.nlossterms\n self.log(\"nlossterms\", self.nlossterms, on_step=True, on_epoch=True)\n self.log(\"loss\", final_loss, on_step=True, on_epoch=True)\n\n if final_loss == 0:\n raise ValueError(\"Loss is equal to 0. Something is misconfigured.\")\n\n return final_loss # for outside optimization\n\n def validation_step(self, val_batch, batch_idx):\n '''\n Validation step does different things if on exclusive pretraining mode or \n doing traditional supervision\n\n There is no return but metrics are computed in 3D (takes a while)\n for pretraining loss is used as a validation metric. \n\n When using boundary loss, we are not computing it in 3D validation.\n '''\n self.nlossterms = 0\n x, y, meta = val_batch\n self.visual_debug(x, y, \"Validation\")\n \n y_hat = None\n preproc = meta[\"preprocess\"][0]\n if preproc == \"pretrain_preprocessing\" and self.val_3d:\n print(f\"Skipping no label 3D validation {preproc}\")\n return\n \n \n if self.poly_level != 0:\n # Traditional supervision\n if y_hat is None:\n y_hat = self.forward(x)\n \n # Compute loss and metrics on CPU due to val_3d memory usage\n if self.val_3d:\n if isinstance(y_hat, dict):\n for _, value in y_hat.items():\n if value.device == torch.device(\"cpu\"):\n y = y.to(value.device)\n break\n elif y_hat.device == torch.device(\"cpu\"):\n y = y.to(y_hat.device)\n \n supervised_loss = self.supervised_loss(y=y, y_hat=y_hat, meta=meta, prestr=\"val_\")\n else:\n supervised_loss = 0\n \n # We only compute validation loss when not using val_3d, since 3D validation loss is very heavy on gpu[\n if self.nlossterms != 0:\n final_loss = supervised_loss/self.nlossterms\n self.log(\"val_nlossterms\", self.nlossterms, on_step=True, on_epoch=True)\n self.log(\"val_supervised_loss\", supervised_loss, on_step=True, on_epoch=True)\n self.log(\"val_loss\", final_loss, on_step=True, on_epoch=True)\n \n def on_validation_epoch_start(self):\n '''\n Start of validation epoch tasks:\n Initialize metric dictionary and list of IDs\n '''\n # Reset metric dict\n if self.val_3d:\n self.metrics: Dict = defaultdict(lambda: defaultdict(list))\n \n def on_validation_epoch_end(self):\n '''\n End of epoch tasks:\n - Increment BDL weights\n - Print results so far in terminal (stdout) for backup logging\n '''\n if self.bdl:\n self.lossfn.increment_weights()\n\n if self.trainer.fast_dev_run or self.trainer.sanity_checking:\n print(\"Fast dev run or sanity checking detected, not logging\")\n elif not self.pretraining and self.val_3d:\n for key, value in self.metrics.items():\n print(f\"\\n{key}\")\n selected_metrics = {\"names\": [], \"values\": []}\n for metric, metric_value in value.items():\n np_metric_value = np.array(metric_value)\n mean = np_metric_value.mean() \n std = np_metric_value.std() \n print(f\"{key} {metric}: {mean}+-{std}\")\n \n # Stopped logging std for every metric, too much not very useful data on neptune\n # self.logger.experiment[f\"training/{key}_{metric}_3d_std\"].log(std)\n \n if metric not in self.excluded_average_metric_keys:\n if \"error\" in metric:\n selected_metrics[\"names\"].append(f\"1 - {metric}\")\n selected_metrics[\"values\"].append(1 - mean)\n else:\n selected_metrics[\"names\"].append(metric)\n selected_metrics[\"values\"].append(mean)\n \n np_selected_metrics = np.array(selected_metrics[\"values\"])\n np_selected_metrics_mean = np_selected_metrics.mean()\n np_selected_metrics_std = np_selected_metrics.std()\n print(f\"Building end-of-epoch composite metric:\")\n for metric, value in zip(selected_metrics[\"names\"], selected_metrics[\"values\"]):\n print(f\"{metric}: {value}\")\n print(f\"{key}_composite_metric: {np_selected_metrics_mean} +- {np_selected_metrics_std}\")\n \n self.logger.experiment[f\"training/{key}_composite_metric\"].log(np_selected_metrics_mean)\n self.logger.experiment[f\"training/{key}_composite_metric_std\"].log(np_selected_metrics_std)\n \n\n def configure_optimizers(self):\n '''\n Select optimizer and scheduling strategy according to hparams.\n '''\n opt = getattr(self.hparams, \"opt\", \"Adam\")\n optimizer = get_optimizer(opt, self.model.parameters(), self.hparams.lr, wd=self.weight_decay)\n print(f\"Opt: {opt}, Weight decay: {self.weight_decay}\")\n\n if self.scheduling == \"poly\":\n print(\"Polynomial LR\")\n # scheduler = PolynomialLR(optimizer, total_iters=self.hparams.max_epochs, power=0.9, verbose=True)\n elif self.scheduling == \"step\" and self.scheduling_factor is None:\n print(\"Not using any scheduler\")\n return optimizer\n elif self.scheduling_factor is not None and self.scheduling == \"step\":\n print(f\"Using step LR {self.scheduling_factor}!\")\n scheduler = StepLR(optimizer, 1, self.scheduling_factor, verbose=True)\n return [optimizer], [scheduler]\n elif self.scheduling == \"cosine\":\n print(f\"Using CosineAnnealingLR with tmax {self.scheduling_factor}!\")\n scheduler = CosineAnnealingLR(optimizer, T_max=self.scheduling_factor, verbose=True)\n return [optimizer], [scheduler]" }, { "identifier": "E2DStackDataset", "path": "medpseg/eval_2d_utils.py", "snippet": "class E2DStackDataset():\n '''\n Speed up evaluation time slice stacking with dataloader compatible dataset\n '''\n def __init__(self, volume, extended_2d):\n self.volume = volume\n self.limits = [0, volume.shape[2] - 1 ]\n self.extended_2d = extended_2d\n \n def __len__(self):\n return self.volume.shape[2]\n\n def __getitem__(self, i):\n if self.extended_2d is None:\n input_slice = self.volume[:, :, i]\n else:\n central_slice = self.volume[:, :, i]\n input_slice = []\n for extend_i in range(-self.extended_2d, self.extended_2d + 1):\n if extend_i == 0:\n input_slice.append(central_slice)\n continue\n\n new_i = i + extend_i\n if new_i > self.limits[1]:\n new_i = self.limits[1]\n if new_i < self.limits[0]:\n new_i = self.limits[0]\n \n input_slice.append(self.volume[:, :, new_i])\n input_slice = torch.cat(input_slice, dim=1)\n '''\n plt.figure(figsize=(12, 6))\n plt.subplot(1, 3, 1)\n plt.imshow(input_slice[0, 0].detach().cpu().numpy(), cmap=\"gray\")\n plt.subplot(1, 3, 2)\n plt.imshow(input_slice[0, 1].detach().cpu().numpy(), cmap=\"gray\")\n plt.subplot(1, 3, 3)\n plt.imshow(input_slice[0, 2].detach().cpu().numpy(), cmap=\"gray\")\n plt.show()\n '''\n return input_slice[0]\n\n def get_dataloader(self, batch_size, pin_memory, num_workers):\n return DataLoader(self, batch_size=batch_size, pin_memory=pin_memory, num_workers=num_workers)" }, { "identifier": "argon_cpu_count", "path": "medpseg/eval_2d_utils.py", "snippet": "def argon_cpu_count() -> int:\n if os.getenv(\"NSLOTS\") is not None:\n return int(os.getenv(\"NSLOTS\"))\n else:\n return cpu_count()" } ]
import os import torch import numpy as np import cc3d import SimpleITK as sitk from medpseg.poly_seg_2d_module import PolySeg2DModule from medpseg.eval_2d_utils import E2DStackDataset, argon_cpu_count from torch.nn import functional as F from tqdm import tqdm from collections import defaultdict from operator import itemgetter from typing import Dict, Optional from multiprocessing import Queue
10,858
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction '''
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction '''
e2d_stack_dataloader = E2DStackDataset(volume, extended_2d=1).get_dataloader(batch_size=batch_size, pin_memory=False, num_workers=argon_cpu_count())
1
2023-11-21 20:03:33+00:00
16k
DLYuanGod/TinyGPT-V
minigpt4/datasets/builders/image_text_pair_builder.py
[ { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "BaseDatasetBuilder", "path": "minigpt4/datasets/builders/base_dataset_builder.py", "snippet": "class BaseDatasetBuilder:\n train_dataset_cls, eval_dataset_cls = None, None\n\n def __init__(self, cfg=None):\n super().__init__()\n\n if cfg is None:\n # help to create datasets from default config.\n self.config = load_dataset_config(self.default_config_path())\n elif isinstance(cfg, str):\n self.config = load_dataset_config(cfg)\n else:\n # when called from task.build_dataset()\n self.config = cfg\n\n self.data_type = self.config.data_type\n\n self.vis_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n self.text_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n\n def build_datasets(self):\n # download, split, etc...\n # only called on 1 GPU/TPU in distributed\n\n if is_main_process():\n self._download_data()\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n # at this point, all the annotations and image/videos should be all downloaded to the specified locations.\n logging.info(\"Building datasets...\")\n datasets = self.build() # dataset['train'/'val'/'test']\n\n return datasets\n\n def build_processors(self):\n vis_proc_cfg = self.config.get(\"vis_processor\")\n txt_proc_cfg = self.config.get(\"text_processor\")\n\n if vis_proc_cfg is not None:\n vis_train_cfg = vis_proc_cfg.get(\"train\")\n vis_eval_cfg = vis_proc_cfg.get(\"eval\")\n\n self.vis_processors[\"train\"] = self._build_proc_from_cfg(vis_train_cfg)\n self.vis_processors[\"eval\"] = self._build_proc_from_cfg(vis_eval_cfg)\n\n if txt_proc_cfg is not None:\n txt_train_cfg = txt_proc_cfg.get(\"train\")\n txt_eval_cfg = txt_proc_cfg.get(\"eval\")\n\n self.text_processors[\"train\"] = self._build_proc_from_cfg(txt_train_cfg)\n self.text_processors[\"eval\"] = self._build_proc_from_cfg(txt_eval_cfg)\n\n @staticmethod\n def _build_proc_from_cfg(cfg):\n return (\n registry.get_processor_class(cfg.name).from_config(cfg)\n if cfg is not None\n else None\n )\n\n @classmethod\n def default_config_path(cls, type=\"default\"):\n return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])\n\n def _download_data(self):\n self._download_ann()\n self._download_vis()\n\n def _download_ann(self):\n \"\"\"\n Download annotation files if necessary.\n All the vision-language datasets should have annotations of unified format.\n\n storage_path can be:\n (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.\n (2) basename/dirname: will be suffixed with base name of URL if dirname is provided.\n\n Local annotation paths should be relative.\n \"\"\"\n anns = self.config.build_info.annotations\n\n splits = anns.keys()\n\n cache_root = registry.get_path(\"cache_root\")\n\n for split in splits:\n info = anns[split]\n\n urls, storage_paths = info.get(\"url\", None), info.storage\n\n if isinstance(urls, str):\n urls = [urls]\n if isinstance(storage_paths, str):\n storage_paths = [storage_paths]\n\n assert len(urls) == len(storage_paths)\n\n for url_or_filename, storage_path in zip(urls, storage_paths):\n # if storage_path is relative, make it full by prefixing with cache_root.\n if not os.path.isabs(storage_path):\n storage_path = os.path.join(cache_root, storage_path)\n\n dirname = os.path.dirname(storage_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if os.path.isfile(url_or_filename):\n src, dst = url_or_filename, storage_path\n if not os.path.exists(dst):\n shutil.copyfile(src=src, dst=dst)\n else:\n logging.info(\"Using existing file {}.\".format(dst))\n else:\n if os.path.isdir(storage_path):\n # if only dirname is provided, suffix with basename of URL.\n raise ValueError(\n \"Expecting storage_path to be a file path, got directory {}\".format(\n storage_path\n )\n )\n else:\n filename = os.path.basename(storage_path)\n\n download_url(url=url_or_filename, root=dirname, filename=filename)\n\n def _download_vis(self):\n\n storage_path = self.config.build_info.get(self.data_type).storage\n storage_path = utils.get_cache_path(storage_path)\n\n if not os.path.exists(storage_path):\n warnings.warn(\n f\"\"\"\n The specified path {storage_path} for visual inputs does not exist.\n Please provide a correct path to the visual inputs or\n refer to datasets/download_scripts/README.md for downloading instructions.\n \"\"\"\n )\n\n def build(self):\n \"\"\"\n Create by split datasets inheriting torch.utils.data.Datasets.\n\n # build() can be dataset-specific. Overwrite to customize.\n \"\"\"\n self.build_processors()\n\n build_info = self.config.build_info\n\n ann_info = build_info.annotations\n vis_info = build_info.get(self.data_type)\n\n datasets = dict()\n for split in ann_info.keys():\n if split not in [\"train\", \"val\", \"test\"]:\n continue\n\n is_train = split == \"train\"\n\n # processors\n vis_processor = (\n self.vis_processors[\"train\"]\n if is_train\n else self.vis_processors[\"eval\"]\n )\n text_processor = (\n self.text_processors[\"train\"]\n if is_train\n else self.text_processors[\"eval\"]\n )\n\n # annotation path\n ann_paths = ann_info.get(split).storage\n if isinstance(ann_paths, str):\n ann_paths = [ann_paths]\n\n abs_ann_paths = []\n for ann_path in ann_paths:\n if not os.path.isabs(ann_path):\n ann_path = utils.get_cache_path(ann_path)\n abs_ann_paths.append(ann_path)\n ann_paths = abs_ann_paths\n\n # visual data storage path\n vis_path = os.path.join(vis_info.storage, split)\n\n if not os.path.isabs(vis_path):\n # vis_path = os.path.join(utils.get_cache_path(), vis_path)\n vis_path = utils.get_cache_path(vis_path)\n\n if not os.path.exists(vis_path):\n warnings.warn(\"storage path {} does not exist.\".format(vis_path))\n\n # create datasets\n dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls\n datasets[split] = dataset_cls(\n vis_processor=vis_processor,\n text_processor=text_processor,\n ann_paths=ann_paths,\n vis_root=vis_path,\n )\n\n return datasets" }, { "identifier": "LaionDataset", "path": "minigpt4/datasets/datasets/laion_dataset.py", "snippet": "class LaionDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUAlignDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUAlignDataset(CaptionDataset):\n\n def __getitem__(self, index):\n\n # TODO this assumes image input, not general enough\n ann = self.annotation[index]\n\n img_file = '{}.jpg'.format(ann[\"image_id\"])\n image_path = os.path.join(self.vis_root, img_file)\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n caption = ann[\"caption\"]\n\n return {\n \"image\": image,\n \"answer\": caption,\n \"image_id\": self.img_ids[ann[\"image_id\"]],\n }" }, { "identifier": "TextCapDataset", "path": "minigpt4/datasets/datasets/text_caps.py", "snippet": "class TextCapDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n 'Briefly describe this image.',\n 'Provide a concise depiction of this image.',\n 'Present a short description of this image.',\n 'Summarize this image in a few words.',\n 'A short image caption:',\n 'A short image description:',\n 'A photo of ',\n 'An image that shows ',\n 'Write a short description for the image. ',\n 'Write a description for the photo.',\n 'Provide a description of what is presented in the photo.',\n 'Briefly describe the content of the image.',\n 'Can you briefly explain what you see in the image?',\n 'Could you use a few words to describe what you perceive in the photo?',\n 'Please provide a short depiction of the picture.',\n 'Using language, provide a short account of the image.',\n 'Use a few words to illustrate what is happening in the picture.',\n ]\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n\n def __len__(self):\n return len(self.ann[\"data\"])\n\n\n def __getitem__(self, index):\n info = self.ann[\"data\"][index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n caption = info[\"caption_str\"]\n caption = self.text_processor(caption)\n instruction = \"<Img><ImageHere></Img> [caption] {} \".format(random.choice(self.instruction_pool))\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": caption,\n }" }, { "identifier": "LlavaDetailDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n \n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaReasonDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaReasonDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n\n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaConversationDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.ann=[]\n\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "UnnaturalDataset", "path": "minigpt4/datasets/datasets/unnatural_instruction.py", "snippet": "class UnnaturalDataset(Dataset):\n def __init__(self, text_processor, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index][\"instances\"][0]\n instruction = info[\"instruction_with_input\"]\n constraints = info[\"constraints\"]\n answer = info[\"output\"]\n if constraints != None:\n instruction = instruction+\" \"+constraints\n\n return {\n \"instruction_input\": self.text_processor(instruction),\n \"answer\": self.text_processor(answer),\n }" }, { "identifier": "MultiTaskConversationDataset", "path": "minigpt4/datasets/datasets/multitask_conversation.py", "snippet": "class MultiTaskConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "GroundedDetailDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class GroundedDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[grounding] please describe this image in details',\n '[grounding] describe this image as detailed as possible',\n '[grounding] summarize this image in details',\n '[grounding] give a thorough description of what you see in this image',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n # image_file = 'COCO_train2014_{}.jpg'.format(info['image_id'])\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['grounded_caption']\n instruction = random.choice(self.instruction_pool)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "CaptionToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class CaptionToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"caption\"]\n answer = info[\"output\"]\n\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"CaptionToObject instruction\", instruction)\n print(\"CaptionToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "PhraseToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class PhraseToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"phrase\"]\n answer = \"<p>\"+input+\"</p> \"+info[\"bbox\"]\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"PhraseToObject instruction\", instruction)\n print(\"PhraseToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "ReferVisualGenomeDataset", "path": "minigpt4/datasets/datasets/vg_dataset.py", "snippet": "class ReferVisualGenomeDataset(Dataset):\n def __init__(self, vis_processor, text_processor, data_dir):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.data_dir = data_dir\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n all_regions = local.get_all_region_descriptions(self.data_dir)\n all_regions = [region for regions in all_regions for region in regions]\n\n # follow OFA practice, only regions smaller than 16384 pixels are used for refer\n self.regions = [region for region in all_regions if region.width * region.height < 16384]\n\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.regions)\n\n def preprocess(self, index):\n region = self.regions[index]\n image_file = region.image.url.split('/')[-2:]\n image_path = os.path.join(self.data_dir, *image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [100,100]\n\n sample_sentence = region.phrase\n refer_sentence = self.text_processor(sample_sentence)\n\n bbox = [region.x, region.y, region.width, region.height]\n\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": region.image.id,\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "ReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class ReferCOCODataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path, dataset='refcoco', splitBy='unc'):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.refer = REFER(ann_path, vis_root, dataset, splitBy)\n self.ref_ids = self.refer.getRefIds(split=\"train\")\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.ref_ids)\n\n def preprocess(self, index):\n ref_id = self.ref_ids[index]\n ref = self.refer.loadRefs(ref_id)[0]\n\n image_file = 'COCO_train2014_{:0>12}.jpg'.format(ref[\"image_id\"])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [image.shape[1], image.shape[2]]\n\n image_new_size = [100,100]\n\n sample_sentence = random.choice(ref['sentences'])['raw']\n refer_sentence = self.text_processor(sample_sentence)\n\n\n bbox = self.refer.getRefBox(ref['ref_id'])\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": ref['image_id'],\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "InvReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class InvReferCOCODataset(ReferCOCODataset):\n def __init__(self, *args, **kwargs):\n super(InvReferCOCODataset, self).__init__(*args, **kwargs)\n\n self.instruction_pool = [\n \"[identify] {}\",\n \"[identify] what object is in this location {}\",\n \"[identify] identify the object present at this location {}\",\n \"[identify] what is it in {}\",\n \"[identify] describe this object in {}\",\n \"[identify] this {} is\",\n \"[identify] the object in {} is\",\n ]\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n\n instruction = random.choice(self.instruction_pool).format(data['bbox'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n \n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['refer_sentence']),\n \"image_id\": data['image_id'],\n }" }, { "identifier": "GQADataset", "path": "minigpt4/datasets/datasets/gqa_datasets.py", "snippet": "class GQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def __getitem__(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n instruction = random.choice(self.instruction_pool).format(question)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n answers = self.text_processor(ann[\"answer\"])\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answers,\n }" }, { "identifier": "AOKVQADataset", "path": "minigpt4/datasets/datasets/aok_vqa_datasets.py", "snippet": "class AOKVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n answer_key = \"direct_answers\"\n\n answer_weight = {}\n for answer in ann[answer_key]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[answer_key])\n else:\n answer_weight[answer] = 1 / len(ann[answer_key])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n return {\n \"image\": image,\n \"question\": question,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n question = self.text_processor(data[\"question\"])\n instruction = random.choice(self.instruction_pool).format(question)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n answer = self.text_processor(data['answer'])\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": answer,\n }" }, { "identifier": "COCOVQADataset", "path": "minigpt4/datasets/datasets/coco_vqa_datasets.py", "snippet": "class COCOVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n question_id = ann[\"question_id\"]\n\n answer_weight = {}\n for answer in ann[\"answer\"]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[\"answer\"])\n else:\n answer_weight[answer] = 1 / len(ann[\"answer\"])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n\n return {\n \"image\": image,\n \"question\": question,\n \"question_id\": question_id,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n instruction = random.choice(self.instruction_pool).format(data['question'])\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"question_id\": data[\"question_id\"],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['answer']),\n }" }, { "identifier": "OCRVQADataset", "path": "minigpt4/datasets/datasets/ocrvqa_dataset.py", "snippet": "class OCRVQADataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n self.data = self.create_data(ann_path)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def create_data(self, ann_path):\n processed_data = []\n with open(ann_path, 'r') as f:\n data = json.load(f)\n for k in data.keys():\n if data[k]['split'] != 1: continue # 1 for training, 2 for validation, 3 for test\n ext = os.path.splitext(data[k]['imageURL'])[1]\n imageFile = k + ext\n assert len(data[k]['questions']) == len(data[k]['answers'])\n for q, a in zip(data[k]['questions'], data[k]['answers']):\n processed_data.append(\n {'question': q,\n 'answer': a,\n 'image_path': imageFile,\n 'image_id': k,\n 'title': data[k]['title'],\n 'genre': data[k]['genre'],\n }\n )\n return processed_data\n\n def __len__(self):\n return len(self.data)" }, { "identifier": "COCOCapDataset", "path": "minigpt4/datasets/datasets/coco_caption.py", "snippet": "class COCOCapEvalDataset(CaptionEvalDataset):\nclass NoCapsEvalDataset(CaptionEvalDataset):\nclass RefCOCOEvalData(torch.utils.data.Dataset):\nclass EvalCaptionData(torch.utils.data.Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __new__(cls, *args, **kwargs):\n def __len__(self):\n def __getitem__(self, idx):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __len__(self):\n def __getitem__(self, idx):" } ]
import os import logging import warnings from minigpt4.common.registry import registry from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder from minigpt4.datasets.datasets.laion_dataset import LaionDataset from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset from minigpt4.datasets.datasets.text_caps import TextCapDataset from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset from minigpt4.datasets.datasets.gqa_datasets import GQADataset from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset from minigpt4.datasets.datasets.coco_caption import COCOCapDataset
11,628
datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder): train_dataset_cls = ReferVisualGenomeDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/vg/ref.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info data_dir = build_info.data_dir datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], data_dir=data_dir, ) return datasets @registry.register_builder("textcaps_caption") class TextcapCaptionBuilder(BaseDatasetBuilder):
@registry.register_builder("multitask_conversation") class MultitaskConversationBuilder(BaseDatasetBuilder): train_dataset_cls = MultiTaskConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/multitask_conversation/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("unnatural_instruction") class UnnaturalInstructionBuilder(BaseDatasetBuilder): train_dataset_cls = UnnaturalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nlp/unnatural_instruction.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( text_processor=self.text_processors["train"], ann_path=build_info.ann_path, ) return datasets @registry.register_builder("llava_detail") class LlavaDetailBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/detail.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_reason") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaReasonDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/reason.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder): train_dataset_cls = ReferVisualGenomeDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/vg/ref.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info data_dir = build_info.data_dir datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], data_dir=data_dir, ) return datasets @registry.register_builder("textcaps_caption") class TextcapCaptionBuilder(BaseDatasetBuilder):
train_dataset_cls = TextCapDataset
5
2023-12-28 05:47:18+00:00
16k
jiawei-ren/dreamgaussian4d
gaussian_model_4d.py
[ { "identifier": "inverse_sigmoid", "path": "utils/general_utils.py", "snippet": "def inverse_sigmoid(x):\n return torch.log(x/(1-x))" }, { "identifier": "get_expon_lr_func", "path": "utils/general_utils.py", "snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper" }, { "identifier": "build_rotation", "path": "utils/general_utils.py", "snippet": "def build_rotation(r):\n norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])\n\n q = r / norm[:, None]\n\n R = torch.zeros((q.size(0), 3, 3), device='cuda')\n\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n\n R[:, 0, 0] = 1 - 2 * (y*y + z*z)\n R[:, 0, 1] = 2 * (x*y - r*z)\n R[:, 0, 2] = 2 * (x*z + r*y)\n R[:, 1, 0] = 2 * (x*y + r*z)\n R[:, 1, 1] = 1 - 2 * (x*x + z*z)\n R[:, 1, 2] = 2 * (y*z - r*x)\n R[:, 2, 0] = 2 * (x*z - r*y)\n R[:, 2, 1] = 2 * (y*z + r*x)\n R[:, 2, 2] = 1 - 2 * (x*x + y*y)\n return R" }, { "identifier": "mkdir_p", "path": "utils/system_utils.py", "snippet": "def mkdir_p(folder_path):\n # Creates a directory. equivalent to using mkdir -p on the command line\n try:\n makedirs(folder_path)\n except OSError as exc: # Python >2.5\n if exc.errno == EEXIST and path.isdir(folder_path):\n pass\n else:\n raise" }, { "identifier": "RGB2SH", "path": "utils/sh_utils.py", "snippet": "def RGB2SH(rgb):\n return (rgb - 0.5) / C0" }, { "identifier": "Mesh", "path": "mesh.py", "snippet": "class Mesh:\n def __init__(\n self,\n v=None,\n f=None,\n vn=None,\n fn=None,\n vt=None,\n ft=None,\n albedo=None,\n vc=None, # vertex color\n device=None,\n ):\n self.device = device\n self.v = v\n self.vn = vn\n self.vt = vt\n self.f = f\n self.fn = fn\n self.ft = ft\n # only support a single albedo\n self.albedo = albedo\n # support vertex color is no albedo\n self.vc = vc\n\n self.ori_center = 0\n self.ori_scale = 1\n\n @classmethod\n def load(cls, path=None, resize=True, renormal=True, retex=False, front_dir='+z', **kwargs):\n # assume init with kwargs\n if path is None:\n mesh = cls(**kwargs)\n # obj supports face uv\n elif path.endswith(\".obj\"):\n mesh = cls.load_obj(path, **kwargs)\n # trimesh only supports vertex uv, but can load more formats\n else:\n mesh = cls.load_trimesh(path, **kwargs)\n\n print(f\"[Mesh loading] v: {mesh.v.shape}, f: {mesh.f.shape}\")\n # auto-normalize\n if resize:\n mesh.auto_size()\n # auto-fix normal\n if renormal or mesh.vn is None:\n mesh.auto_normal()\n print(f\"[Mesh loading] vn: {mesh.vn.shape}, fn: {mesh.fn.shape}\")\n # auto-fix texcoords\n if retex or (mesh.albedo is not None and mesh.vt is None):\n mesh.auto_uv(cache_path=path)\n print(f\"[Mesh loading] vt: {mesh.vt.shape}, ft: {mesh.ft.shape}\")\n\n # rotate front dir to +z\n if front_dir != \"+z\":\n # axis switch\n if \"-z\" in front_dir:\n T = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, -1]], device=mesh.device, dtype=torch.float32)\n elif \"+x\" in front_dir:\n T = torch.tensor([[0, 0, 1], [0, 1, 0], [1, 0, 0]], device=mesh.device, dtype=torch.float32)\n elif \"-x\" in front_dir:\n T = torch.tensor([[0, 0, -1], [0, 1, 0], [1, 0, 0]], device=mesh.device, dtype=torch.float32)\n elif \"+y\" in front_dir:\n T = torch.tensor([[1, 0, 0], [0, 0, 1], [0, 1, 0]], device=mesh.device, dtype=torch.float32)\n elif \"-y\" in front_dir:\n T = torch.tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]], device=mesh.device, dtype=torch.float32)\n else:\n T = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]], device=mesh.device, dtype=torch.float32)\n # rotation (how many 90 degrees)\n if '1' in front_dir:\n T @= torch.tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]], device=mesh.device, dtype=torch.float32) \n elif '2' in front_dir:\n T @= torch.tensor([[1, 0, 0], [0, -1, 0], [0, 0, 1]], device=mesh.device, dtype=torch.float32) \n elif '3' in front_dir:\n T @= torch.tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]], device=mesh.device, dtype=torch.float32) \n mesh.v @= T\n mesh.vn @= T\n\n return mesh\n\n # load from obj file\n @classmethod\n def load_obj(cls, path, albedo_path=None, device=None):\n assert os.path.splitext(path)[-1] == \".obj\"\n\n mesh = cls()\n\n # device\n if device is None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n mesh.device = device\n\n # load obj\n with open(path, \"r\") as f:\n lines = f.readlines()\n\n def parse_f_v(fv):\n # pass in a vertex term of a face, return {v, vt, vn} (-1 if not provided)\n # supported forms:\n # f v1 v2 v3\n # f v1/vt1 v2/vt2 v3/vt3\n # f v1/vt1/vn1 v2/vt2/vn2 v3/vt3/vn3\n # f v1//vn1 v2//vn2 v3//vn3\n xs = [int(x) - 1 if x != \"\" else -1 for x in fv.split(\"/\")]\n xs.extend([-1] * (3 - len(xs)))\n return xs[0], xs[1], xs[2]\n\n # NOTE: we ignore usemtl, and assume the mesh ONLY uses one material (first in mtl)\n vertices, texcoords, normals = [], [], []\n faces, tfaces, nfaces = [], [], []\n mtl_path = None\n\n for line in lines:\n split_line = line.split()\n # empty line\n if len(split_line) == 0:\n continue\n prefix = split_line[0].lower()\n # mtllib\n if prefix == \"mtllib\":\n mtl_path = split_line[1]\n # usemtl\n elif prefix == \"usemtl\":\n pass # ignored\n # v/vn/vt\n elif prefix == \"v\":\n vertices.append([float(v) for v in split_line[1:]])\n elif prefix == \"vn\":\n normals.append([float(v) for v in split_line[1:]])\n elif prefix == \"vt\":\n val = [float(v) for v in split_line[1:]]\n texcoords.append([val[0], 1.0 - val[1]])\n elif prefix == \"f\":\n vs = split_line[1:]\n nv = len(vs)\n v0, t0, n0 = parse_f_v(vs[0])\n for i in range(nv - 2): # triangulate (assume vertices are ordered)\n v1, t1, n1 = parse_f_v(vs[i + 1])\n v2, t2, n2 = parse_f_v(vs[i + 2])\n faces.append([v0, v1, v2])\n tfaces.append([t0, t1, t2])\n nfaces.append([n0, n1, n2])\n\n mesh.v = torch.tensor(vertices, dtype=torch.float32, device=device)\n mesh.vt = (\n torch.tensor(texcoords, dtype=torch.float32, device=device)\n if len(texcoords) > 0\n else None\n )\n mesh.vn = (\n torch.tensor(normals, dtype=torch.float32, device=device)\n if len(normals) > 0\n else None\n )\n\n mesh.f = torch.tensor(faces, dtype=torch.int32, device=device)\n mesh.ft = (\n torch.tensor(tfaces, dtype=torch.int32, device=device)\n if len(texcoords) > 0\n else None\n )\n mesh.fn = (\n torch.tensor(nfaces, dtype=torch.int32, device=device)\n if len(normals) > 0\n else None\n )\n\n # see if there is vertex color\n use_vertex_color = False\n if mesh.v.shape[1] == 6:\n use_vertex_color = True\n mesh.vc = mesh.v[:, 3:]\n mesh.v = mesh.v[:, :3]\n print(f\"[load_obj] use vertex color: {mesh.vc.shape}\")\n\n # try to load texture image\n if not use_vertex_color:\n # try to retrieve mtl file\n mtl_path_candidates = []\n if mtl_path is not None:\n mtl_path_candidates.append(mtl_path)\n mtl_path_candidates.append(os.path.join(os.path.dirname(path), mtl_path))\n mtl_path_candidates.append(path.replace(\".obj\", \".mtl\"))\n\n mtl_path = None\n for candidate in mtl_path_candidates:\n if os.path.exists(candidate):\n mtl_path = candidate\n break\n \n # if albedo_path is not provided, try retrieve it from mtl\n if mtl_path is not None and albedo_path is None:\n with open(mtl_path, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n split_line = line.split()\n # empty line\n if len(split_line) == 0:\n continue\n prefix = split_line[0]\n # NOTE: simply use the first map_Kd as albedo!\n if \"map_Kd\" in prefix:\n albedo_path = os.path.join(os.path.dirname(path), split_line[1])\n print(f\"[load_obj] use texture from: {albedo_path}\")\n break\n \n # still not found albedo_path, or the path doesn't exist\n if albedo_path is None or not os.path.exists(albedo_path):\n # init an empty texture\n print(f\"[load_obj] init empty albedo!\")\n # albedo = np.random.rand(1024, 1024, 3).astype(np.float32)\n albedo = np.ones((1024, 1024, 3), dtype=np.float32) * np.array([0.5, 0.5, 0.5]) # default color\n else:\n albedo = cv2.imread(albedo_path, cv2.IMREAD_UNCHANGED)\n albedo = cv2.cvtColor(albedo, cv2.COLOR_BGR2RGB)\n albedo = albedo.astype(np.float32) / 255\n print(f\"[load_obj] load texture: {albedo.shape}\")\n\n # import matplotlib.pyplot as plt\n # plt.imshow(albedo)\n # plt.show()\n\n mesh.albedo = torch.tensor(albedo, dtype=torch.float32, device=device)\n\n return mesh\n\n @classmethod\n def load_trimesh(cls, path, device=None):\n mesh = cls()\n\n # device\n if device is None:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n mesh.device = device\n\n # use trimesh to load ply/glb, assume only has one single RootMesh...\n _data = trimesh.load(path)\n if isinstance(_data, trimesh.Scene):\n if len(_data.geometry) == 1:\n _mesh = list(_data.geometry.values())[0]\n else:\n # manual concat, will lose texture\n _concat = []\n for g in _data.geometry.values():\n if isinstance(g, trimesh.Trimesh):\n _concat.append(g)\n _mesh = trimesh.util.concatenate(_concat)\n else:\n _mesh = _data\n \n if _mesh.visual.kind == 'vertex':\n vertex_colors = _mesh.visual.vertex_colors\n vertex_colors = np.array(vertex_colors[..., :3]).astype(np.float32) / 255\n mesh.vc = torch.tensor(vertex_colors, dtype=torch.float32, device=device)\n print(f\"[load_trimesh] use vertex color: {mesh.vc.shape}\")\n elif _mesh.visual.kind == 'texture':\n _material = _mesh.visual.material\n if isinstance(_material, trimesh.visual.material.PBRMaterial):\n texture = np.array(_material.baseColorTexture).astype(np.float32) / 255\n elif isinstance(_material, trimesh.visual.material.SimpleMaterial):\n texture = np.array(_material.to_pbr().baseColorTexture).astype(np.float32) / 255\n else:\n raise NotImplementedError(f\"material type {type(_material)} not supported!\")\n mesh.albedo = torch.tensor(texture, dtype=torch.float32, device=device)\n print(f\"[load_trimesh] load texture: {texture.shape}\")\n else:\n texture = np.ones((1024, 1024, 3), dtype=np.float32) * np.array([0.5, 0.5, 0.5])\n mesh.albedo = torch.tensor(texture, dtype=torch.float32, device=device)\n print(f\"[load_trimesh] failed to load texture.\")\n\n vertices = _mesh.vertices\n\n try:\n texcoords = _mesh.visual.uv\n texcoords[:, 1] = 1 - texcoords[:, 1]\n except Exception as e:\n texcoords = None\n\n try:\n normals = _mesh.vertex_normals\n except Exception as e:\n normals = None\n\n # trimesh only support vertex uv...\n faces = tfaces = nfaces = _mesh.faces\n\n mesh.v = torch.tensor(vertices, dtype=torch.float32, device=device)\n mesh.vt = (\n torch.tensor(texcoords, dtype=torch.float32, device=device)\n if texcoords is not None\n else None\n )\n mesh.vn = (\n torch.tensor(normals, dtype=torch.float32, device=device)\n if normals is not None\n else None\n )\n\n mesh.f = torch.tensor(faces, dtype=torch.int32, device=device)\n mesh.ft = (\n torch.tensor(tfaces, dtype=torch.int32, device=device)\n if texcoords is not None\n else None\n )\n mesh.fn = (\n torch.tensor(nfaces, dtype=torch.int32, device=device)\n if normals is not None\n else None\n )\n\n return mesh\n\n # aabb\n def aabb(self):\n return torch.min(self.v, dim=0).values, torch.max(self.v, dim=0).values\n\n # unit size\n @torch.no_grad()\n def auto_size(self):\n vmin, vmax = self.aabb()\n self.ori_center = (vmax + vmin) / 2\n self.ori_scale = 1.2 / torch.max(vmax - vmin).item()\n self.v = (self.v - self.ori_center) * self.ori_scale\n\n def auto_normal(self):\n i0, i1, i2 = self.f[:, 0].long(), self.f[:, 1].long(), self.f[:, 2].long()\n v0, v1, v2 = self.v[i0, :], self.v[i1, :], self.v[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n vn = torch.zeros_like(self.v)\n vn.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n vn.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n vn.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n vn = torch.where(\n dot(vn, vn) > 1e-20,\n vn,\n torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device),\n )\n vn = safe_normalize(vn)\n\n self.vn = vn\n self.fn = self.f\n\n def auto_uv(self, cache_path=None, vmap=True):\n # try to load cache\n if cache_path is not None:\n cache_path = os.path.splitext(cache_path)[0] + \"_uv.npz\"\n if cache_path is not None and os.path.exists(cache_path):\n data = np.load(cache_path)\n vt_np, ft_np, vmapping = data[\"vt\"], data[\"ft\"], data[\"vmapping\"]\n else:\n import xatlas\n\n v_np = self.v.detach().cpu().numpy()\n f_np = self.f.detach().int().cpu().numpy()\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n # chart_options.max_iterations = 4\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # save to cache\n if cache_path is not None:\n np.savez(cache_path, vt=vt_np, ft=ft_np, vmapping=vmapping)\n \n vt = torch.from_numpy(vt_np.astype(np.float32)).to(self.device)\n ft = torch.from_numpy(ft_np.astype(np.int32)).to(self.device)\n self.vt = vt\n self.ft = ft\n\n if vmap:\n # remap v/f to vt/ft, so each v correspond to a unique vt. (necessary for gltf)\n vmapping = torch.from_numpy(vmapping.astype(np.int64)).long().to(self.device)\n self.align_v_to_vt(vmapping)\n \n def align_v_to_vt(self, vmapping=None):\n # remap v/f and vn/vn to vt/ft.\n if vmapping is None:\n ft = self.ft.view(-1).long()\n f = self.f.view(-1).long()\n vmapping = torch.zeros(self.vt.shape[0], dtype=torch.long, device=self.device)\n vmapping[ft] = f # scatter, randomly choose one if index is not unique\n\n self.v = self.v[vmapping]\n self.f = self.ft\n # assume fn == f\n if self.vn is not None:\n self.vn = self.vn[vmapping]\n self.fn = self.ft\n\n def to(self, device):\n self.device = device\n for name in [\"v\", \"f\", \"vn\", \"fn\", \"vt\", \"ft\", \"albedo\"]:\n tensor = getattr(self, name)\n if tensor is not None:\n setattr(self, name, tensor.to(device))\n return self\n \n def write(self, path):\n if path.endswith(\".ply\"):\n self.write_ply(path)\n elif path.endswith(\".obj\"):\n self.write_obj(path)\n elif path.endswith(\".glb\") or path.endswith(\".gltf\"):\n self.write_glb(path)\n else:\n raise NotImplementedError(f\"format {path} not supported!\")\n \n # write to ply file (only geom)\n def write_ply(self, path):\n\n v_np = self.v.detach().cpu().numpy()\n f_np = self.f.detach().cpu().numpy()\n\n _mesh = trimesh.Trimesh(vertices=v_np, faces=f_np)\n _mesh.export(path)\n\n # write to gltf/glb file (geom + texture)\n def write_glb(self, path):\n\n assert self.vn is not None and self.vt is not None # should be improved to support export without texture...\n\n # assert self.v.shape[0] == self.vn.shape[0] and self.v.shape[0] == self.vt.shape[0]\n if self.v.shape[0] != self.vt.shape[0]:\n self.align_v_to_vt()\n\n # assume f == fn == ft\n\n import pygltflib\n\n f_np = self.f.detach().cpu().numpy().astype(np.uint32)\n v_np = self.v.detach().cpu().numpy().astype(np.float32)\n # vn_np = self.vn.detach().cpu().numpy().astype(np.float32)\n vt_np = self.vt.detach().cpu().numpy().astype(np.float32)\n\n albedo = self.albedo.detach().cpu().numpy()\n albedo = (albedo * 255).astype(np.uint8)\n albedo = cv2.cvtColor(albedo, cv2.COLOR_RGB2BGR)\n\n f_np_blob = f_np.flatten().tobytes()\n v_np_blob = v_np.tobytes()\n # vn_np_blob = vn_np.tobytes()\n vt_np_blob = vt_np.tobytes()\n albedo_blob = cv2.imencode('.png', albedo)[1].tobytes()\n\n gltf = pygltflib.GLTF2(\n scene=0,\n scenes=[pygltflib.Scene(nodes=[0])],\n nodes=[pygltflib.Node(mesh=0)],\n meshes=[pygltflib.Mesh(primitives=[\n pygltflib.Primitive(\n # indices to accessors (0 is triangles)\n attributes=pygltflib.Attributes(\n POSITION=1, TEXCOORD_0=2, \n ),\n indices=0, material=0,\n )\n ])],\n materials=[\n pygltflib.Material(\n pbrMetallicRoughness=pygltflib.PbrMetallicRoughness(\n baseColorTexture=pygltflib.TextureInfo(index=0, texCoord=0),\n metallicFactor=0.0,\n roughnessFactor=1.0,\n ),\n alphaCutoff=0,\n doubleSided=True,\n )\n ],\n textures=[\n pygltflib.Texture(sampler=0, source=0),\n ],\n samplers=[\n pygltflib.Sampler(magFilter=pygltflib.LINEAR, minFilter=pygltflib.LINEAR_MIPMAP_LINEAR, wrapS=pygltflib.REPEAT, wrapT=pygltflib.REPEAT),\n ],\n images=[\n # use embedded (buffer) image\n pygltflib.Image(bufferView=3, mimeType=\"image/png\"),\n ],\n buffers=[\n pygltflib.Buffer(byteLength=len(f_np_blob) + len(v_np_blob) + len(vt_np_blob) + len(albedo_blob))\n ],\n # buffer view (based on dtype)\n bufferViews=[\n # triangles; as flatten (element) array\n pygltflib.BufferView(\n buffer=0,\n byteLength=len(f_np_blob),\n target=pygltflib.ELEMENT_ARRAY_BUFFER, # GL_ELEMENT_ARRAY_BUFFER (34963)\n ),\n # positions; as vec3 array\n pygltflib.BufferView(\n buffer=0,\n byteOffset=len(f_np_blob),\n byteLength=len(v_np_blob),\n byteStride=12, # vec3\n target=pygltflib.ARRAY_BUFFER, # GL_ARRAY_BUFFER (34962)\n ),\n # texcoords; as vec2 array\n pygltflib.BufferView(\n buffer=0,\n byteOffset=len(f_np_blob) + len(v_np_blob),\n byteLength=len(vt_np_blob),\n byteStride=8, # vec2\n target=pygltflib.ARRAY_BUFFER,\n ),\n # texture; as none target\n pygltflib.BufferView(\n buffer=0,\n byteOffset=len(f_np_blob) + len(v_np_blob) + len(vt_np_blob),\n byteLength=len(albedo_blob),\n ),\n ],\n accessors=[\n # 0 = triangles\n pygltflib.Accessor(\n bufferView=0,\n componentType=pygltflib.UNSIGNED_INT, # GL_UNSIGNED_INT (5125)\n count=f_np.size,\n type=pygltflib.SCALAR,\n max=[int(f_np.max())],\n min=[int(f_np.min())],\n ),\n # 1 = positions\n pygltflib.Accessor(\n bufferView=1,\n componentType=pygltflib.FLOAT, # GL_FLOAT (5126)\n count=len(v_np),\n type=pygltflib.VEC3,\n max=v_np.max(axis=0).tolist(),\n min=v_np.min(axis=0).tolist(),\n ),\n # 2 = texcoords\n pygltflib.Accessor(\n bufferView=2,\n componentType=pygltflib.FLOAT,\n count=len(vt_np),\n type=pygltflib.VEC2,\n max=vt_np.max(axis=0).tolist(),\n min=vt_np.min(axis=0).tolist(),\n ),\n ],\n )\n\n # set actual data\n gltf.set_binary_blob(f_np_blob + v_np_blob + vt_np_blob + albedo_blob)\n\n # glb = b\"\".join(gltf.save_to_bytes())\n gltf.save(path)\n\n # write to obj file (geom + texture)\n def write_obj(self, path):\n\n mtl_path = path.replace(\".obj\", \".mtl\")\n albedo_path = path.replace(\".obj\", \"_albedo.png\")\n\n v_np = self.v.detach().cpu().numpy()\n vt_np = self.vt.detach().cpu().numpy() if self.vt is not None else None\n vn_np = self.vn.detach().cpu().numpy() if self.vn is not None else None\n f_np = self.f.detach().cpu().numpy()\n ft_np = self.ft.detach().cpu().numpy() if self.ft is not None else None\n fn_np = self.fn.detach().cpu().numpy() if self.fn is not None else None\n\n with open(path, \"w\") as fp:\n fp.write(f\"mtllib {os.path.basename(mtl_path)} \\n\")\n\n for v in v_np:\n fp.write(f\"v {v[0]} {v[1]} {v[2]} \\n\")\n\n if vt_np is not None:\n for v in vt_np:\n fp.write(f\"vt {v[0]} {1 - v[1]} \\n\")\n\n if vn_np is not None:\n for v in vn_np:\n fp.write(f\"vn {v[0]} {v[1]} {v[2]} \\n\")\n\n fp.write(f\"usemtl defaultMat \\n\")\n for i in range(len(f_np)):\n fp.write(\n f'f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1 if ft_np is not None else \"\"}/{fn_np[i, 0] + 1 if fn_np is not None else \"\"} \\\n {f_np[i, 1] + 1}/{ft_np[i, 1] + 1 if ft_np is not None else \"\"}/{fn_np[i, 1] + 1 if fn_np is not None else \"\"} \\\n {f_np[i, 2] + 1}/{ft_np[i, 2] + 1 if ft_np is not None else \"\"}/{fn_np[i, 2] + 1 if fn_np is not None else \"\"} \\n'\n )\n\n with open(mtl_path, \"w\") as fp:\n fp.write(f\"newmtl defaultMat \\n\")\n fp.write(f\"Ka 1 1 1 \\n\")\n fp.write(f\"Kd 1 1 1 \\n\")\n fp.write(f\"Ks 0 0 0 \\n\")\n fp.write(f\"Tr 1 \\n\")\n fp.write(f\"illum 1 \\n\")\n fp.write(f\"Ns 0 \\n\")\n fp.write(f\"map_Kd {os.path.basename(albedo_path)} \\n\")\n\n albedo = self.albedo.detach().cpu().numpy()\n albedo = (albedo * 255).astype(np.uint8)\n cv2.imwrite(albedo_path, cv2.cvtColor(albedo, cv2.COLOR_RGB2BGR))" }, { "identifier": "decimate_mesh", "path": "mesh_utils.py", "snippet": "def decimate_mesh(\n verts, faces, target, backend=\"pymeshlab\", remesh=False, optimalplacement=True\n):\n # optimalplacement: default is True, but for flat mesh must turn False to prevent spike artifect.\n\n _ori_vert_shape = verts.shape\n _ori_face_shape = faces.shape\n\n if backend == \"pyfqmr\":\n import pyfqmr\n\n solver = pyfqmr.Simplify()\n solver.setMesh(verts, faces)\n solver.simplify_mesh(target_count=target, preserve_border=False, verbose=False)\n verts, faces, normals = solver.getMesh()\n else:\n m = pml.Mesh(verts, faces)\n ms = pml.MeshSet()\n ms.add_mesh(m, \"mesh\") # will copy!\n\n # filters\n # ms.meshing_decimation_clustering(threshold=pml.Percentage(1))\n # ms.meshing_decimation_quadric_edge_collapse(\n # targetfacenum=int(target), optimalplacement=optimalplacement\n # )\n ms.simplification_quadric_edge_collapse_decimation(\n targetfacenum=int(target), optimalplacement=optimalplacement\n )\n\n if remesh:\n # ms.apply_coord_taubin_smoothing()\n # ms.meshing_isotropic_explicit_remeshing(\n # iterations=3, targetlen=pml.Percentage(1)\n # )\n ms.remeshing_isotropic_explicit_remeshing(\n iterations=3, \n targetlen=pml.Percentage(1)\n )\n\n # extract mesh\n m = ms.current_mesh()\n verts = m.vertex_matrix()\n faces = m.face_matrix()\n\n print(\n f\"[INFO] mesh decimation: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}\"\n )\n\n return verts, faces" }, { "identifier": "clean_mesh", "path": "mesh_utils.py", "snippet": "def clean_mesh(\n verts,\n faces,\n v_pct=1,\n min_f=64,\n min_d=20,\n repair=True,\n remesh=True,\n remesh_size=0.01,\n):\n # verts: [N, 3]\n # faces: [N, 3]\n\n _ori_vert_shape = verts.shape\n _ori_face_shape = faces.shape\n\n m = pml.Mesh(verts, faces)\n ms = pml.MeshSet()\n ms.add_mesh(m, \"mesh\") # will copy!\n\n # filters\n # ms.meshing_remove_unreferenced_vertices() # verts not refed by any faces\n ms.remove_unreferenced_vertices()\n\n if v_pct > 0:\n # ms.meshing_merge_close_vertices(\n # threshold=pml.Percentage(v_pct)\n # ) # 1/10000 of bounding box diagonal\n ms.merge_close_vertices(\n threshold=pml.Percentage(v_pct)\n )\n\n # ms.meshing_remove_duplicate_faces() # faces defined by the same verts\n ms.remove_duplicate_faces()\n # ms.meshing_remove_null_faces() # faces with area == 0\n ms.remove_zero_area_faces()\n\n if min_d > 0:\n # ms.meshing_remove_connected_component_by_diameter(\n # mincomponentdiag=pml.Percentage(min_d)\n # )\n ms.remove_isolated_pieces_wrt_diameter(\n mincomponentdiag=pml.Percentage(min_d)\n )\n\n if min_f > 0:\n # ms.meshing_remove_connected_component_by_face_number(mincomponentsize=min_f)\n ms.remove_isolated_pieces_wrt_face_num(mincomponentsize=min_f)\n\n if repair:\n # ms.meshing_remove_t_vertices(method=0, threshold=40, repeat=True)\n # ms.meshing_repair_non_manifold_edges(method=0)\n ms.repair_non_manifold_edges_by_removing_faces()\n # ms.meshing_repair_non_manifold_vertices(vertdispratio=0)\n ms.repair_non_manifold_vertices_by_splitting(vertdispratio=0)\n\n if remesh:\n # ms.apply_coord_taubin_smoothing()\n # ms.meshing_isotropic_explicit_remeshing(\n # iterations=3, targetlen=pml.AbsoluteValue(remesh_size)\n # )\n ms.remeshing_isotropic_explicit_remeshing(\n iterations=3, \n targetlen=pml.Percentage(1)\n )\n\n\n # extract mesh\n m = ms.current_mesh()\n verts = m.vertex_matrix()\n faces = m.face_matrix()\n\n print(\n f\"[INFO] mesh cleaning: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}\"\n )\n\n return verts, faces" }, { "identifier": "BasicPointCloud", "path": "utils/graphics_utils.py", "snippet": "class BasicPointCloud(NamedTuple):\n points : np.array\n colors : np.array\n normals : np.array" }, { "identifier": "strip_symmetric", "path": "utils/general_utils.py", "snippet": "def strip_symmetric(sym):\n return strip_lowerdiag(sym)" }, { "identifier": "build_scaling_rotation", "path": "utils/general_utils.py", "snippet": "def build_scaling_rotation(s, r):\n L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device=\"cuda\")\n R = build_rotation(r)\n\n L[:,0,0] = s[:,0]\n L[:,1,1] = s[:,1]\n L[:,2,2] = s[:,2]\n\n L = R @ L\n return L" }, { "identifier": "deform_network", "path": "scene/deformation.py", "snippet": "class deform_network(nn.Module):\n def __init__(self, args) :\n super(deform_network, self).__init__()\n net_width = args.net_width\n timebase_pe = args.timebase_pe\n defor_depth= args.defor_depth\n posbase_pe= args.posebase_pe\n scale_rotation_pe = args.scale_rotation_pe\n opacity_pe = args.opacity_pe\n timenet_width = args.timenet_width\n timenet_output = args.timenet_output\n times_ch = 2*timebase_pe+1\n self.timenet = nn.Sequential(\n nn.Linear(times_ch, timenet_width), nn.ReLU(),\n nn.Linear(timenet_width, timenet_output))\n \n self.use_res = args.use_res\n if self.use_res:\n print(\"Using zero-init and residual\")\n self.deformation_net = Deformation(W=net_width, D=defor_depth, input_ch=(4+3)+((4+3)*scale_rotation_pe)*2, input_ch_time=timenet_output, args=args, use_res=self.use_res)\n self.register_buffer('time_poc', torch.FloatTensor([(2**i) for i in range(timebase_pe)]))\n self.register_buffer('pos_poc', torch.FloatTensor([(2**i) for i in range(posbase_pe)]))\n self.register_buffer('rotation_scaling_poc', torch.FloatTensor([(2**i) for i in range(scale_rotation_pe)]))\n self.register_buffer('opacity_poc', torch.FloatTensor([(2**i) for i in range(opacity_pe)]))\n self.apply(initialize_weights)\n\n if self.use_res:\n # self.deformation_net.feature_out.initialize_weights()\n self.deformation_net.pos_deform.initialize_weights()\n self.deformation_net.scales_deform.initialize_weights()\n self.deformation_net.rotations_deform.initialize_weights()\n self.deformation_net.opacity_deform.initialize_weights()\n\n # self.deformation_net.feature_out.apply(initialize_zeros_weights)\n # print(self)\n\n def forward(self, point, scales=None, rotations=None, opacity=None, times_sel=None):\n if times_sel is not None:\n return self.forward_dynamic(point, scales, rotations, opacity, times_sel)\n else:\n return self.forward_static(point)\n\n \n def forward_static(self, points):\n points = self.deformation_net(points)\n return points\n def forward_dynamic(self, point, scales=None, rotations=None, opacity=None, times_sel=None):\n # times_emb = poc_fre(times_sel, self.time_poc)\n\n means3D, scales, rotations, opacity = self.deformation_net( point,\n scales,\n rotations,\n opacity,\n # times_feature,\n times_sel)\n return means3D, scales, rotations, opacity\n def get_mlp_parameters(self):\n return self.deformation_net.get_mlp_parameters() + list(self.timenet.parameters())\n def get_grid_parameters(self):\n return self.deformation_net.get_grid_parameters()" }, { "identifier": "compute_plane_smoothness", "path": "scene/regulation.py", "snippet": "def compute_plane_smoothness(t):\n batch_size, c, h, w = t.shape\n # Convolve with a second derivative filter, in the time dimension which is dimension 2\n first_difference = t[..., 1:, :] - t[..., :h-1, :] # [batch, c, h-1, w]\n second_difference = first_difference[..., 1:, :] - first_difference[..., :h-2, :] # [batch, c, h-2, w]\n # Take the L2 norm of the result\n return torch.square(second_difference).mean()" } ]
import torch import numpy as np import os import mcubes import mcubes from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation from torch import nn from utils.system_utils import mkdir_p from plyfile import PlyData, PlyElement from random import randint from utils.sh_utils import RGB2SH from simple_knn._C import distCUDA2 from mesh import Mesh from mesh_utils import decimate_mesh, clean_mesh from utils.graphics_utils import BasicPointCloud from utils.general_utils import strip_symmetric, build_scaling_rotation from scene.deformation import deform_network from scene.regulation import compute_plane_smoothness
11,983
self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) @property def get_rotation(self): return self.rotation_activation(self._rotation) @property def get_xyz(self): return self._xyz @property def get_features(self): features_dc = self._features_dc features_rest = self._features_rest return torch.cat((features_dc, features_rest), dim=1) @property def get_opacity(self): return self.opacity_activation(self._opacity) @torch.no_grad() def extract_fields(self, resolution=128, num_blocks=16, relax_ratio=1.5): # resolution: resolution of field block_size = 2 / num_blocks assert resolution % block_size == 0 split_size = resolution // num_blocks opacities = self.get_opacity # pre-filter low opacity gaussians to save computation mask = (opacities > 0.005).squeeze(1) opacities = opacities[mask] xyzs = self.get_xyz[mask] stds = self.get_scaling[mask] # normalize to ~ [-1, 1] mn, mx = xyzs.amin(0), xyzs.amax(0) self.center = (mn + mx) / 2 self.scale = 1.8 / (mx - mn).amax().item() xyzs = (xyzs - self.center) * self.scale stds = stds * self.scale covs = self.covariance_activation(stds, 1, self._rotation[mask]) # tile device = opacities.device occ = torch.zeros([resolution] * 3, dtype=torch.float32, device=device) X = torch.linspace(-1, 1, resolution).split(split_size) Y = torch.linspace(-1, 1, resolution).split(split_size) Z = torch.linspace(-1, 1, resolution).split(split_size) # loop blocks (assume max size of gaussian is small than relax_ratio * block_size !!!) for xi, xs in enumerate(X): for yi, ys in enumerate(Y): for zi, zs in enumerate(Z): xx, yy, zz = torch.meshgrid(xs, ys, zs) # sample points [M, 3] pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1).to(device) # in-tile gaussians mask vmin, vmax = pts.amin(0), pts.amax(0) vmin -= block_size * relax_ratio vmax += block_size * relax_ratio mask = (xyzs < vmax).all(-1) & (xyzs > vmin).all(-1) # if hit no gaussian, continue to next block if not mask.any(): continue mask_xyzs = xyzs[mask] # [L, 3] mask_covs = covs[mask] # [L, 6] mask_opas = opacities[mask].view(1, -1) # [L, 1] --> [1, L] # query per point-gaussian pair. g_pts = pts.unsqueeze(1).repeat(1, mask_covs.shape[0], 1) - mask_xyzs.unsqueeze(0) # [M, L, 3] g_covs = mask_covs.unsqueeze(0).repeat(pts.shape[0], 1, 1) # [M, L, 6] # batch on gaussian to avoid OOM batch_g = 1024 val = 0 for start in range(0, g_covs.shape[1], batch_g): end = min(start + batch_g, g_covs.shape[1]) w = gaussian_3d_coeff(g_pts[:, start:end].reshape(-1, 3), g_covs[:, start:end].reshape(-1, 6)).reshape(pts.shape[0], -1) # [M, l] val += (mask_opas[:, start:end] * w).sum(-1) # kiui.lo(val, mask_opas, w) occ[xi * split_size: xi * split_size + len(xs), yi * split_size: yi * split_size + len(ys), zi * split_size: zi * split_size + len(zs)] = val.reshape(len(xs), len(ys), len(zs)) return occ def extract_mesh(self, path, density_thresh=1, resolution=128, decimate_target=1e5): os.makedirs(os.path.dirname(path), exist_ok=True) occ = self.extract_fields(resolution).detach().cpu().numpy() vertices, triangles = mcubes.marching_cubes(occ, density_thresh) vertices = vertices / (resolution - 1.0) * 2 - 1 # transform back to the original space vertices = vertices / self.scale + self.center.detach().cpu().numpy()
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # def gaussian_3d_coeff(xyzs, covs): # xyzs: [N, 3] # covs: [N, 6] x, y, z = xyzs[:, 0], xyzs[:, 1], xyzs[:, 2] a, b, c, d, e, f = covs[:, 0], covs[:, 1], covs[:, 2], covs[:, 3], covs[:, 4], covs[:, 5] # eps must be small enough !!! inv_det = 1 / (a * d * f + 2 * e * c * b - e**2 * a - c**2 * d - b**2 * f + 1e-24) inv_a = (d * f - e**2) * inv_det inv_b = (e * c - b * f) * inv_det inv_c = (e * b - c * d) * inv_det inv_d = (a * f - c**2) * inv_det inv_e = (b * c - e * a) * inv_det inv_f = (a * d - b**2) * inv_det power = -0.5 * (x**2 * inv_a + y**2 * inv_d + z**2 * inv_f) - x * y * inv_b - x * z * inv_c - y * z * inv_e power[power > 0] = -1e10 # abnormal values... make weights 0 return torch.exp(power) class GaussianModel: def setup_functions(self): def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): L = build_scaling_rotation(scaling_modifier * scaling, rotation) actual_covariance = L @ L.transpose(1, 2) symm = strip_symmetric(actual_covariance) return symm self.scaling_activation = torch.exp self.scaling_inverse_activation = torch.log self.covariance_activation = build_covariance_from_scaling_rotation self.opacity_activation = torch.sigmoid self.inverse_opacity_activation = inverse_sigmoid self.rotation_activation = torch.nn.functional.normalize def __init__(self, sh_degree : int, args): self.active_sh_degree = 0 self.max_sh_degree = sh_degree self._xyz = torch.empty(0) # self._deformation = torch.empty(0) self._deformation = deform_network(args) # self.grid = TriPlaneGrid() self._features_dc = torch.empty(0) self._features_rest = torch.empty(0) self._scaling = torch.empty(0) self._rotation = torch.empty(0) self._opacity = torch.empty(0) self.max_radii2D = torch.empty(0) self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) self.optimizer = None self.percent_dense = 0 self.spatial_lr_scale = 0 self._deformation_table = torch.empty(0) self.setup_functions() def capture(self): return ( self.active_sh_degree, self._xyz, self._deformation.state_dict(), self._deformation_table, # self.grid, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, self.xyz_gradient_accum, self.denom, self.optimizer.state_dict(), self.spatial_lr_scale, ) def restore(self, model_args, training_args): (self.active_sh_degree, self._xyz, self._deformation_table, self._deformation, # self.grid, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) @property def get_rotation(self): return self.rotation_activation(self._rotation) @property def get_xyz(self): return self._xyz @property def get_features(self): features_dc = self._features_dc features_rest = self._features_rest return torch.cat((features_dc, features_rest), dim=1) @property def get_opacity(self): return self.opacity_activation(self._opacity) @torch.no_grad() def extract_fields(self, resolution=128, num_blocks=16, relax_ratio=1.5): # resolution: resolution of field block_size = 2 / num_blocks assert resolution % block_size == 0 split_size = resolution // num_blocks opacities = self.get_opacity # pre-filter low opacity gaussians to save computation mask = (opacities > 0.005).squeeze(1) opacities = opacities[mask] xyzs = self.get_xyz[mask] stds = self.get_scaling[mask] # normalize to ~ [-1, 1] mn, mx = xyzs.amin(0), xyzs.amax(0) self.center = (mn + mx) / 2 self.scale = 1.8 / (mx - mn).amax().item() xyzs = (xyzs - self.center) * self.scale stds = stds * self.scale covs = self.covariance_activation(stds, 1, self._rotation[mask]) # tile device = opacities.device occ = torch.zeros([resolution] * 3, dtype=torch.float32, device=device) X = torch.linspace(-1, 1, resolution).split(split_size) Y = torch.linspace(-1, 1, resolution).split(split_size) Z = torch.linspace(-1, 1, resolution).split(split_size) # loop blocks (assume max size of gaussian is small than relax_ratio * block_size !!!) for xi, xs in enumerate(X): for yi, ys in enumerate(Y): for zi, zs in enumerate(Z): xx, yy, zz = torch.meshgrid(xs, ys, zs) # sample points [M, 3] pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1).to(device) # in-tile gaussians mask vmin, vmax = pts.amin(0), pts.amax(0) vmin -= block_size * relax_ratio vmax += block_size * relax_ratio mask = (xyzs < vmax).all(-1) & (xyzs > vmin).all(-1) # if hit no gaussian, continue to next block if not mask.any(): continue mask_xyzs = xyzs[mask] # [L, 3] mask_covs = covs[mask] # [L, 6] mask_opas = opacities[mask].view(1, -1) # [L, 1] --> [1, L] # query per point-gaussian pair. g_pts = pts.unsqueeze(1).repeat(1, mask_covs.shape[0], 1) - mask_xyzs.unsqueeze(0) # [M, L, 3] g_covs = mask_covs.unsqueeze(0).repeat(pts.shape[0], 1, 1) # [M, L, 6] # batch on gaussian to avoid OOM batch_g = 1024 val = 0 for start in range(0, g_covs.shape[1], batch_g): end = min(start + batch_g, g_covs.shape[1]) w = gaussian_3d_coeff(g_pts[:, start:end].reshape(-1, 3), g_covs[:, start:end].reshape(-1, 6)).reshape(pts.shape[0], -1) # [M, l] val += (mask_opas[:, start:end] * w).sum(-1) # kiui.lo(val, mask_opas, w) occ[xi * split_size: xi * split_size + len(xs), yi * split_size: yi * split_size + len(ys), zi * split_size: zi * split_size + len(zs)] = val.reshape(len(xs), len(ys), len(zs)) return occ def extract_mesh(self, path, density_thresh=1, resolution=128, decimate_target=1e5): os.makedirs(os.path.dirname(path), exist_ok=True) occ = self.extract_fields(resolution).detach().cpu().numpy() vertices, triangles = mcubes.marching_cubes(occ, density_thresh) vertices = vertices / (resolution - 1.0) * 2 - 1 # transform back to the original space vertices = vertices / self.scale + self.center.detach().cpu().numpy()
vertices, triangles = clean_mesh(vertices, triangles, remesh=True, remesh_size=0.015)
7
2023-12-28 08:17:40+00:00
16k
FoundationVision/UniRef
detectron2/data/datasets/coco.py
[ { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "PolygonMasks", "path": "detectron2/structures/masks.py", "snippet": "class PolygonMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in the form of polygons.\n\n Attributes:\n polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.\n \"\"\"\n\n def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):\n \"\"\"\n Arguments:\n polygons (list[list[np.ndarray]]): The first\n level of the list correspond to individual instances,\n the second level to all the polygons that compose the\n instance, and the third level to the polygon coordinates.\n The third level array should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n \"\"\"\n if not isinstance(polygons, list):\n raise ValueError(\n \"Cannot create PolygonMasks: Expect a list of list of polygons per image. \"\n \"Got '{}' instead.\".format(type(polygons))\n )\n\n def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\n # Use float64 for higher precision, because why not?\n # Always put polygons on CPU (self.to is a no-op) since they\n # are supposed to be small tensors.\n # May need to change this assumption if GPU placement becomes useful\n if isinstance(t, torch.Tensor):\n t = t.cpu().numpy()\n return np.asarray(t).astype(\"float64\")\n\n def process_polygons(\n polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]\n ) -> List[np.ndarray]:\n if not isinstance(polygons_per_instance, list):\n raise ValueError(\n \"Cannot create polygons: Expect a list of polygons per instance. \"\n \"Got '{}' instead.\".format(type(polygons_per_instance))\n )\n # transform each polygon to a numpy array\n polygons_per_instance = [_make_array(p) for p in polygons_per_instance]\n for polygon in polygons_per_instance:\n if len(polygon) % 2 != 0 or len(polygon) < 6:\n raise ValueError(f\"Cannot create a polygon from {len(polygon)} coordinates.\")\n return polygons_per_instance\n\n self.polygons: List[List[np.ndarray]] = [\n process_polygons(polygons_per_instance) for polygons_per_instance in polygons\n ]\n\n def to(self, *args: Any, **kwargs: Any) -> \"PolygonMasks\":\n return self\n\n @property\n def device(self) -> torch.device:\n return torch.device(\"cpu\")\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around polygon masks.\n \"\"\"\n boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)\n for idx, polygons_per_instance in enumerate(self.polygons):\n minxy = torch.as_tensor([float(\"inf\"), float(\"inf\")], dtype=torch.float32)\n maxxy = torch.zeros(2, dtype=torch.float32)\n for polygon in polygons_per_instance:\n coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)\n minxy = torch.min(minxy, torch.min(coords, dim=0).values)\n maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)\n boxes[idx, :2] = minxy\n boxes[idx, 2:] = maxxy\n return Boxes(boxes)\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor:\n a BoolTensor which represents whether each mask is empty (False) or not (True).\n \"\"\"\n keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]\n return torch.from_numpy(np.asarray(keep, dtype=np.bool))\n\n def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\n \"\"\"\n Support indexing over the instances and return a `PolygonMasks` object.\n `item` can be:\n\n 1. An integer. It will return an object with only one instance.\n 2. A slice. It will return an object with the selected instances.\n 3. A list[int]. It will return an object with the selected instances,\n correpsonding to the indices in the list.\n 4. A vector mask of type BoolTensor, whose length is num_instances.\n It will return an object with the instances whose mask is nonzero.\n \"\"\"\n if isinstance(item, int):\n selected_polygons = [self.polygons[item]]\n elif isinstance(item, slice):\n selected_polygons = self.polygons[item]\n elif isinstance(item, list):\n selected_polygons = [self.polygons[i] for i in item]\n elif isinstance(item, torch.Tensor):\n # Polygons is a list, so we have to move the indices back to CPU.\n if item.dtype == torch.bool:\n assert item.dim() == 1, item.shape\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\n elif item.dtype in [torch.int32, torch.int64]:\n item = item.cpu().numpy().tolist()\n else:\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\n selected_polygons = [self.polygons[i] for i in item]\n return PolygonMasks(selected_polygons)\n\n def __iter__(self) -> Iterator[List[np.ndarray]]:\n \"\"\"\n Yields:\n list[ndarray]: the polygons for one instance.\n Each Tensor is a float64 vector representing a polygon.\n \"\"\"\n return iter(self.polygons)\n\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.polygons))\n return s\n\n def __len__(self) -> int:\n return len(self.polygons)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each mask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor: A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n\n device = boxes.device\n # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise\n # (several small tensors for representing a single instance mask)\n boxes = boxes.to(torch.device(\"cpu\"))\n\n results = [\n rasterize_polygons_within_box(poly, box.numpy(), mask_size)\n for poly, box in zip(self.polygons, boxes)\n ]\n \"\"\"\n poly: list[list[float]], the polygons for one instance\n box: a tensor of shape (4,)\n \"\"\"\n if len(results) == 0:\n return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)\n return torch.stack(results, dim=0).to(device=device)\n\n def area(self):\n \"\"\"\n Computes area of the mask.\n Only works with Polygons, using the shoelace formula:\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n Returns:\n Tensor: a vector, area for each instance\n \"\"\"\n\n area = []\n for polygons_per_instance in self.polygons:\n area_per_instance = 0\n for p in polygons_per_instance:\n area_per_instance += polygon_area(p[0::2], p[1::2])\n area.append(area_per_instance)\n\n return torch.tensor(area)\n\n @staticmethod\n def cat(polymasks_list: List[\"PolygonMasks\"]) -> \"PolygonMasks\":\n \"\"\"\n Concatenates a list of PolygonMasks into a single PolygonMasks\n\n Arguments:\n polymasks_list (list[PolygonMasks])\n\n Returns:\n PolygonMasks: the concatenated PolygonMasks\n \"\"\"\n assert isinstance(polymasks_list, (list, tuple))\n assert len(polymasks_list) > 0\n assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)\n\n cat_polymasks = type(polymasks_list[0])(\n list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))\n )\n return cat_polymasks" }, { "identifier": "RotatedBoxes", "path": "detectron2/structures/rotated_boxes.py", "snippet": "class RotatedBoxes(Boxes):\n \"\"\"\n This structure stores a list of rotated boxes as a Nx5 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx5 matrix. Each row is\n (x_center, y_center, width, height, angle),\n in which angle is represented in degrees.\n While there's no strict range restriction for it,\n the recommended principal range is between [-180, 180) degrees.\n\n Assume we have a horizontal box B = (x_center, y_center, width, height),\n where width is along the x-axis and height is along the y-axis.\n The rotated box B_rot (x_center, y_center, width, height, angle)\n can be seen as:\n\n 1. When angle == 0:\n B_rot == B\n 2. When angle > 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;\n 3. When angle < 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.\n\n Mathematically, since the right-handed coordinate system for image space\n is (y, x), where y is top->down and x is left->right, the 4 vertices of the\n rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from\n the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)\n in the following way (:math:`\\\\theta = angle*\\\\pi/180` is the angle in radians,\n :math:`(y_c, x_c)` is the center of the rectangle):\n\n .. math::\n\n yr_i = \\\\cos(\\\\theta) (y_i - y_c) - \\\\sin(\\\\theta) (x_i - x_c) + y_c,\n\n xr_i = \\\\sin(\\\\theta) (y_i - y_c) + \\\\cos(\\\\theta) (x_i - x_c) + x_c,\n\n which is the standard rigid-body rotation transformation.\n\n Intuitively, the angle is\n (1) the rotation angle from y-axis in image space\n to the height vector (top->down in the box's local coordinate system)\n of the box in CCW, and\n (2) the rotation angle from x-axis in image space\n to the width vector (left->right in the box's local coordinate system)\n of the box in CCW.\n\n More intuitively, consider the following horizontal box ABCD represented\n in (x1, y1, x2, y2): (3, 2, 7, 4),\n covering the [3, 7] x [2, 4] region of the continuous coordinate system\n which looks like this:\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | |\n | D---C\n |\n v y\n\n Note that each capital letter represents one 0-dimensional geometric point\n instead of a 'square pixel' here.\n\n In the example above, using (x, y) to represent a point we have:\n\n .. math::\n\n O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)\n\n We name vector AB = vector DC as the width vector in box's local coordinate system, and\n vector AD = vector BC as the height vector in box's local coordinate system. Initially,\n when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis\n in the image space, respectively.\n\n For better illustration, we denote the center of the box as E,\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | E |\n | D---C\n |\n v y\n\n where the center E = ((3+7)/2, (2+4)/2) = (5, 3).\n\n Also,\n\n .. math::\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Therefore, the corresponding representation for the same shape in rotated box in\n (x_center, y_center, width, height, angle) format is:\n\n (5, 3, 4, 2, 0),\n\n Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees\n CCW (counter-clockwise) by definition. It looks like this:\n\n .. code:: none\n\n O--------> x\n | B-C\n | | |\n | |E|\n | | |\n | A-D\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CCW with regard to E:\n A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)\n\n Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to\n vector AD or vector BC (the top->down height vector in box's local coordinate system),\n or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right\n width vector in box's local coordinate system).\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)\n by definition? It looks like this:\n\n .. code:: none\n\n O--------> x\n | D-A\n | | |\n | |E|\n | | |\n | C-B\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CW with regard to E:\n A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU\n will be 1. However, these two will generate different RoI Pooling results and\n should not be treated as an identical box.\n\n On the other hand, it's easy to see that (X, Y, W, H, A) is identical to\n (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be\n identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is\n equivalent to rotating the same shape 90 degrees CW.\n\n We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):\n\n .. code:: none\n\n O--------> x\n |\n | C---D\n | | E |\n | B---A\n |\n v y\n\n .. math::\n\n A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Finally, this is a very inaccurate (heavily quantized) illustration of\n how (5, 3, 4, 2, 60) looks like in case anyone wonders:\n\n .. code:: none\n\n O--------> x\n | B\\\n | / C\n | /E /\n | A /\n | `D\n v y\n\n It's still a rectangle with center of (5, 3), width of 4 and height of 2,\n but its angle (and thus orientation) is somewhere between\n (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"RotatedBoxes\":\n \"\"\"\n Clone the RotatedBoxes.\n\n Returns:\n RotatedBoxes\n \"\"\"\n return RotatedBoxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return RotatedBoxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = box[:, 2] * box[:, 3]\n return area\n\n def normalize_angles(self) -> None:\n \"\"\"\n Restrict angles to the range of [-180, 180) degrees\n \"\"\"\n self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0\n\n def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n For RRPN:\n Only clip boxes that are almost horizontal with a tolerance of\n clip_angle_threshold to maintain backward compatibility.\n\n Rotated boxes beyond this threshold are not clipped for two reasons:\n\n 1. There are potentially multiple ways to clip a rotated box to make it\n fit within the image.\n 2. It's tricky to make the entire rectangular box fit within the image\n and still be able to not leave out pixels of interest.\n\n Therefore we rely on ops like RoIAlignRotated to safely handle this.\n\n Args:\n box_size (height, width): The clipping box's size.\n clip_angle_threshold:\n Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),\n we do the clipping as horizontal boxes.\n \"\"\"\n h, w = box_size\n\n # normalize angles to be within (-180, 180] degrees\n self.normalize_angles()\n\n idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]\n\n # convert to (x1, y1, x2, y2)\n x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0\n y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0\n x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0\n y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0\n\n # clip\n x1.clamp_(min=0, max=w)\n y1.clamp_(min=0, max=h)\n x2.clamp_(min=0, max=w)\n y2.clamp_(min=0, max=h)\n\n # convert back to (xc, yc, w, h)\n self.tensor[idx, 0] = (x1 + x2) / 2.0\n self.tensor[idx, 1] = (y1 + y2) / 2.0\n # make sure widths and heights do not increase due to numerical errors\n self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)\n self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor: a binary vector which represents\n whether each box is empty (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2]\n heights = box[:, 3]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"RotatedBoxes\":\n \"\"\"\n Returns:\n RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned RotatedBoxes might share storage with this RotatedBoxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return RotatedBoxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on RotatedBoxes with {} failed to return a matrix!\".format(\n item\n )\n return RotatedBoxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"RotatedBoxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box covering\n [0, width] x [0, height]\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n For RRPN, it might not be necessary to call this function since it's common\n for rotated box to extend to outside of the image boundaries\n (the clip function only clips the near-horizontal boxes)\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n\n cnt_x = self.tensor[..., 0]\n cnt_y = self.tensor[..., 1]\n half_w = self.tensor[..., 2] / 2.0\n half_h = self.tensor[..., 3] / 2.0\n a = self.tensor[..., 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n max_rect_dx = c * half_w + s * half_h\n max_rect_dy = c * half_h + s * half_w\n\n inds_inside = (\n (cnt_x - max_rect_dx >= -boundary_threshold)\n & (cnt_y - max_rect_dy >= -boundary_threshold)\n & (cnt_x + max_rect_dx < width + boundary_threshold)\n & (cnt_y + max_rect_dy < height + boundary_threshold)\n )\n\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return self.tensor[:, :2]\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the rotated box with horizontal and vertical scaling factors\n Note: when scale_factor_x != scale_factor_y,\n the rotated box does not preserve the rectangular shape when the angle\n is not a multiple of 90 degrees under resize transformation.\n Instead, the shape is a parallelogram (that has skew)\n Here we make an approximation by fitting a rotated rectangle to the parallelogram.\n \"\"\"\n self.tensor[:, 0] *= scale_x\n self.tensor[:, 1] *= scale_y\n theta = self.tensor[:, 4] * math.pi / 180.0\n c = torch.cos(theta)\n s = torch.sin(theta)\n\n # In image space, y is top->down and x is left->right\n # Consider the local coordintate system for the rotated box,\n # where the box center is located at (0, 0), and the four vertices ABCD are\n # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)\n # the midpoint of the left edge AD of the rotated box E is:\n # E = (A+D)/2 = (-w / 2, 0)\n # the midpoint of the top edge AB of the rotated box F is:\n # F(0, -h / 2)\n # To get the old coordinates in the global system, apply the rotation transformation\n # (Note: the right-handed coordinate system for image space is yOx):\n # (old_x, old_y) = (s * y + c * x, c * y - s * x)\n # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)\n # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)\n # After applying the scaling factor (sfx, sfy):\n # E(new) = (-sfx * c * w / 2, sfy * s * w / 2)\n # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)\n # The new width after scaling tranformation becomes:\n\n # w(new) = |E(new) - O| * 2\n # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2\n # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w\n # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y\n self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)\n\n # h(new) = |F(new) - O| * 2\n # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2\n # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h\n # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x\n self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)\n\n # The angle is the rotation angle from y-axis in image space to the height\n # vector (top->down in the box's local coordinate system) of the box in CCW.\n #\n # angle(new) = angle_yOx(O - F(new))\n # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )\n # = atan2(sfx * s * h / 2, sfy * c * h / 2)\n # = atan2(sfx * s, sfy * c)\n #\n # For example,\n # when sfx == sfy, angle(new) == atan2(s, c) == angle(old)\n self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi\n\n @classmethod\n def cat(cls, boxes_list: List[\"RotatedBoxes\"]) -> \"RotatedBoxes\":\n \"\"\"\n Concatenates a list of RotatedBoxes into a single RotatedBoxes\n\n Arguments:\n boxes_list (list[RotatedBoxes])\n\n Returns:\n RotatedBoxes: the concatenated RotatedBoxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, RotatedBoxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (5,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "DatasetCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" } ]
import contextlib import datetime import io import json import logging import numpy as np import os import shutil import pycocotools.mask as mask_util import detectron2.data.datasets # noqa # add pre-defined metadata import sys from fvcore.common.timer import Timer from iopath.common.file_io import file_lock from PIL import Image from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes from detectron2.utils.file_io import PathManager from .. import DatasetCatalog, MetadataCatalog from pycocotools.coco import COCO from detectron2.utils.logger import setup_logger from detectron2.utils.visualizer import Visualizer
13,194
# the category ids to contiguous ids in [0, 80). # It works by looking at the "categories" field in the json, therefore # if users' own json also have incontiguous ids, we'll # apply this mapping as well but print a warning. if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): if "coco" not in dataset_name: logger.warning( """ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. """ ) id_map = {v: i for i, v in enumerate(cat_ids)} meta.thing_dataset_id_to_contiguous_id = id_map # sort indices for reproducible results img_ids = sorted(coco_api.imgs.keys()) # imgs is a list of dicts, each looks something like: # {'license': 4, # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', # 'file_name': 'COCO_val2014_000000001268.jpg', # 'height': 427, # 'width': 640, # 'date_captured': '2013-11-17 05:57:24', # 'id': 1268} imgs = coco_api.loadImgs(img_ids) # anns is a list[list[dict]], where each dict is an annotation # record for an object. The inner list enumerates the objects in an image # and the outer list enumerates over images. Example of anns[0]: # [{'segmentation': [[192.81, # 247.09, # ... # 219.03, # 249.06]], # 'area': 1035.749, # 'iscrowd': 0, # 'image_id': 1268, # 'bbox': [192.81, 224.8, 74.73, 33.43], # 'category_id': 16, # 'id': 42986}, # ...] anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] total_num_valid_anns = sum([len(x) for x in anns]) total_num_anns = len(coco_api.anns) if total_num_valid_anns < total_num_anns: logger.warning( f"{json_file} contains {total_num_anns} annotations, but only " f"{total_num_valid_anns} of them match to images in the file." ) if "minival" not in json_file: # The popular valminusminival & minival annotations for COCO2014 contain this bug. # However the ratio of buggy annotations there is tiny and does not affect accuracy. # Therefore we explicitly white-list them. ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( json_file ) imgs_anns = list(zip(imgs, anns)) logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) dataset_dicts = [] ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) num_instances_without_valid_segmentation = 0 for (img_dict, anno_dict_list) in imgs_anns: record = {} record["file_name"] = os.path.join(image_root, img_dict["file_name"]) record["height"] = img_dict["height"] record["width"] = img_dict["width"] image_id = record["image_id"] = img_dict["id"] objs = [] for anno in anno_dict_list: # Check that the image_id in this annotation is the same as # the image_id we're looking at. # This fails only when the data parsing logic or the annotation file is buggy. # The original COCO valminusminival2014 & minival2014 annotation files # actually contains bugs that, together with certain ways of using COCO API, # can trigger this assertion. assert anno["image_id"] == image_id assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' obj = {key: anno[key] for key in ann_keys if key in anno} if "bbox" in obj and len(obj["bbox"]) == 0: raise ValueError( f"One annotation of image {image_id} contains empty 'bbox' value! " "This json does not have valid COCO format." ) segm = anno.get("segmentation", None) if segm: # either list[list[float]] or dict(RLE) if isinstance(segm, dict): if isinstance(segm["counts"], list): # convert to compressed RLE segm = mask_util.frPyObjects(segm, *segm["size"]) else: # filter out invalid polygons (< 3 points) segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] if len(segm) == 0: num_instances_without_valid_segmentation += 1 continue # ignore this instance obj["segmentation"] = segm keypts = anno.get("keypoints", None) if keypts: # list[int] for idx, v in enumerate(keypts): if idx % 3 != 2: # COCO's segmentation coordinates are floating points in [0, H or W], # but keypoint coordinates are integers in [0, H-1 or W-1] # Therefore we assume the coordinates are "pixel indices" and # add 0.5 to convert to floating point coordinates. keypts[idx] = v + 0.5 obj["keypoints"] = keypts
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". """ logger = logging.getLogger(__name__) __all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None, dataset_name_in_dict="coco"): """ Load a json file with COCO's instances annotation format. Currently supports instance detection, instance segmentation, and person keypoints annotations. Args: json_file (str): full path to the json file in COCO instances annotation format. image_root (str or path-like): the directory where the images in this json file exists. dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). When provided, this function will also do the following: * Put "thing_classes" into the metadata associated with this dataset. * Map the category ids into a contiguous range (needed by standard dataset format), and add "thing_dataset_id_to_contiguous_id" to the metadata associated with this dataset. This option should usually be provided, unless users need to load the original json content and apply more processing manually. extra_annotation_keys (list[str]): list of per-annotation keys that should also be loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", "category_id", "segmentation"). The values for these keys will be returned as-is. For example, the densepose annotations are loaded in this way. Returns: list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See `Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None. If `dataset_name` is None, the returned `category_ids` may be incontiguous and may not conform to the Detectron2 standard format. Notes: 1. This function does not read the image files. The results do not have the "image" field. """ timer = Timer() json_file = PathManager.get_local_path(json_file) with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file) if timer.seconds() > 1: logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) id_map = None if dataset_name is not None: meta = MetadataCatalog.get(dataset_name) cat_ids = sorted(coco_api.getCatIds()) cats = coco_api.loadCats(cat_ids) # The categories in a custom json file may not be sorted. thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] meta.thing_classes = thing_classes # In COCO, certain category ids are artificially removed, # and by convention they are always ignored. # We deal with COCO's id issue and translate # the category ids to contiguous ids in [0, 80). # It works by looking at the "categories" field in the json, therefore # if users' own json also have incontiguous ids, we'll # apply this mapping as well but print a warning. if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): if "coco" not in dataset_name: logger.warning( """ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. """ ) id_map = {v: i for i, v in enumerate(cat_ids)} meta.thing_dataset_id_to_contiguous_id = id_map # sort indices for reproducible results img_ids = sorted(coco_api.imgs.keys()) # imgs is a list of dicts, each looks something like: # {'license': 4, # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', # 'file_name': 'COCO_val2014_000000001268.jpg', # 'height': 427, # 'width': 640, # 'date_captured': '2013-11-17 05:57:24', # 'id': 1268} imgs = coco_api.loadImgs(img_ids) # anns is a list[list[dict]], where each dict is an annotation # record for an object. The inner list enumerates the objects in an image # and the outer list enumerates over images. Example of anns[0]: # [{'segmentation': [[192.81, # 247.09, # ... # 219.03, # 249.06]], # 'area': 1035.749, # 'iscrowd': 0, # 'image_id': 1268, # 'bbox': [192.81, 224.8, 74.73, 33.43], # 'category_id': 16, # 'id': 42986}, # ...] anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] total_num_valid_anns = sum([len(x) for x in anns]) total_num_anns = len(coco_api.anns) if total_num_valid_anns < total_num_anns: logger.warning( f"{json_file} contains {total_num_anns} annotations, but only " f"{total_num_valid_anns} of them match to images in the file." ) if "minival" not in json_file: # The popular valminusminival & minival annotations for COCO2014 contain this bug. # However the ratio of buggy annotations there is tiny and does not affect accuracy. # Therefore we explicitly white-list them. ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( json_file ) imgs_anns = list(zip(imgs, anns)) logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) dataset_dicts = [] ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) num_instances_without_valid_segmentation = 0 for (img_dict, anno_dict_list) in imgs_anns: record = {} record["file_name"] = os.path.join(image_root, img_dict["file_name"]) record["height"] = img_dict["height"] record["width"] = img_dict["width"] image_id = record["image_id"] = img_dict["id"] objs = [] for anno in anno_dict_list: # Check that the image_id in this annotation is the same as # the image_id we're looking at. # This fails only when the data parsing logic or the annotation file is buggy. # The original COCO valminusminival2014 & minival2014 annotation files # actually contains bugs that, together with certain ways of using COCO API, # can trigger this assertion. assert anno["image_id"] == image_id assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' obj = {key: anno[key] for key in ann_keys if key in anno} if "bbox" in obj and len(obj["bbox"]) == 0: raise ValueError( f"One annotation of image {image_id} contains empty 'bbox' value! " "This json does not have valid COCO format." ) segm = anno.get("segmentation", None) if segm: # either list[list[float]] or dict(RLE) if isinstance(segm, dict): if isinstance(segm["counts"], list): # convert to compressed RLE segm = mask_util.frPyObjects(segm, *segm["size"]) else: # filter out invalid polygons (< 3 points) segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] if len(segm) == 0: num_instances_without_valid_segmentation += 1 continue # ignore this instance obj["segmentation"] = segm keypts = anno.get("keypoints", None) if keypts: # list[int] for idx, v in enumerate(keypts): if idx % 3 != 2: # COCO's segmentation coordinates are floating points in [0, H or W], # but keypoint coordinates are integers in [0, H-1 or W-1] # Therefore we assume the coordinates are "pixel indices" and # add 0.5 to convert to floating point coordinates. keypts[idx] = v + 0.5 obj["keypoints"] = keypts
obj["bbox_mode"] = BoxMode.XYWH_ABS
1
2023-12-22 13:31:33+00:00
16k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n # improve the resolution of DMTET at these steps\n progressive_resolution_steps: Optional[int] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n self.cached_sdf = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # adjust the position of mesh\n if \"full_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.3\n elif \"half_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.1\n elif \"head_only\" in mesh_path:\n mesh.vertices[:,2] = mesh.vertices[:,2] + 0.15\n elif \"t-pose\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.4\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(2000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((40000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n\n sdf_loss: Optional[Float[Tensor, \"*N 1\"]] = None\n if self.cfg.use_sdf_loss and self.cached_sdf is not None:\n selected_points_idx = torch.LongTensor(random.sample(range(points_unscaled.shape[0]), 100000))\n gt_sdf = torch.from_numpy(-self.cached_sdf(points_unscaled[selected_points_idx].cpu().numpy())).to(\n points_unscaled\n )[..., None]\n sdf_loss = F.mse_loss(gt_sdf, sdf[selected_points_idx], reduction='sum')\n return sdf, deformation, sdf_loss\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n\n if global_step >= (self.cfg.start_sdf_loss_step + 1) and self.cached_sdf is None:\n\n from pysdf import SDF\n import trimesh\n\n mesh_v_pos = np.load('.threestudio_cache/mesh_v_pos.npy')\n mesh_t_pos_idx = np.load('.threestudio_cache/mesh_t_pos_idx.npy')\n cached_mesh = trimesh.Trimesh(\n vertices=mesh_v_pos,\n faces=mesh_t_pos_idx,\n )\n self.cached_sdf = SDF(cached_mesh.vertices, cached_mesh.faces)\n\n if self.cfg.progressive_resolution_steps is not None:\n if global_step >= self.cfg.progressive_resolution_steps[0] and self.cfg.isosurface_resolution < 256:\n self.cfg.isosurface_resolution = 256\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n if global_step >= self.cfg.progressive_resolution_steps[1] and self.cfg.isosurface_resolution < 512:\n self.cfg.isosurface_resolution = 512\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n \n setattr(co, 'max_cost', 2.0)\n setattr(po, 'resolution', 4096)\n \n atlas.generate(co, po, verbose=True)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
14,299
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
self.mesh: Optional[Mesh] = None
6
2023-12-23 12:37:48+00:00
16k
Con6924/SPM
evaluate_task.py
[ { "identifier": "config", "path": "src/configs/config.py", "snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel):\nclass LoggingConfig(BaseModel):\nclass InferenceConfig(BaseModel):\nclass OtherConfig(BaseModel):\nclass RootConfig(BaseModel):\ndef parse_precision(precision: str) -> torch.dtype:\ndef load_config_from_yaml(config_path: str) -> RootConfig:" }, { "identifier": "RootConfig", "path": "src/configs/config.py", "snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None" }, { "identifier": "GenerationConfig", "path": "src/configs/generation_config.py", "snippet": "class GenerationConfig(BaseModel):\n prompts: list[str] = []\n negative_prompt: str = \"bad anatomy,watermark,extra digit,signature,worst quality,jpeg artifacts,normal quality,low quality,long neck,lowres,error,blurry,missing fingers,fewer digits,missing arms,text,cropped,Humpbacked,bad hands,username\"\n unconditional_prompt: str = \"\"\n width: int = 512\n height: int = 512\n num_inference_steps: int = 30\n guidance_scale: float = 7.5\n seed: int = 2024\n generate_num: int = 1\n\n save_path: str = None # can be a template, e.g. \"path/to/img_{}.png\",\n # then the generated images will be saved as \"path/to/img_0.png\", \"path/to/img_1.png\", ...\n\n def dict(self):\n results = {}\n for attr in vars(self):\n if not attr.startswith(\"_\"):\n results[attr] = getattr(self, attr)\n return results\n \n @staticmethod\n def fix_format(cfg):\n for k, v in cfg.items():\n if isinstance(v, list):\n cfg[k] = v[0]\n elif isinstance(v, torch.Tensor):\n cfg[k] = v.item()" }, { "identifier": "train_util", "path": "src/engine/train_util.py", "snippet": "UNET_IN_CHANNELS = 4 # Stable Diffusion の in_channels は 4 で固定。XLも同じ。\nVAE_SCALE_FACTOR = 8 # 2 ** (len(vae.config.block_out_channels) - 1) = 8\nUNET_ATTENTION_TIME_EMBED_DIM = 256 # XL\nTEXT_ENCODER_2_PROJECTION_DIM = 1280\nUNET_PROJECTION_CLASS_EMBEDDING_INPUT_DIM = 2816\ndef get_random_noise(\n batch_size: int, height: int, width: int, generator: torch.Generator = None\n) -> torch.Tensor:\ndef apply_noise_offset(latents: torch.FloatTensor, noise_offset: float):\ndef get_initial_latents(\n scheduler: SchedulerMixin,\n n_imgs: int,\n height: int,\n width: int,\n n_prompts: int,\n generator=None,\n) -> torch.Tensor:\ndef text_tokenize(\n tokenizer: CLIPTokenizer, # 普通ならひとつ、XLならふたつ!\n prompts: list[str],\n):\ndef text_encode(text_encoder: CLIPTextModel, tokens):\ndef encode_prompts(\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTokenizer,\n prompts: list[str],\n return_tokens: bool = False,\n):\ndef text_encode_xl(\n text_encoder: SDXL_TEXT_ENCODER_TYPE,\n tokens: torch.FloatTensor,\n num_images_per_prompt: int = 1,\n):\ndef encode_prompts_xl(\n tokenizers: list[CLIPTokenizer],\n text_encoders: list[SDXL_TEXT_ENCODER_TYPE],\n prompts: list[str],\n num_images_per_prompt: int = 1,\n) -> tuple[torch.FloatTensor, torch.FloatTensor]:\ndef concat_embeddings(\n unconditional: torch.FloatTensor,\n conditional: torch.FloatTensor,\n n_imgs: int,\n):\ndef predict_noise(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n guidance_scale=7.5,\n) -> torch.FloatTensor:\ndef diffusion(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: torch.FloatTensor,\n total_timesteps: int = 1000,\n start_timesteps=0,\n **kwargs,\n):\ndef rescale_noise_cfg(\n noise_cfg: torch.FloatTensor, noise_pred_text, guidance_rescale=0.0\n):\ndef predict_noise_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale=7.5,\n guidance_rescale=0.7,\n) -> torch.FloatTensor:\ndef diffusion_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: tuple[torch.FloatTensor, torch.FloatTensor],\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale: float = 1.0,\n total_timesteps: int = 1000,\n start_timesteps=0,\n):\ndef get_add_time_ids(\n height: int,\n width: int,\n dynamic_crops: bool = False,\n dtype: torch.dtype = torch.float32,\n):\ndef get_optimizer(config, trainable_params):\ndef get_scheduler_fix(config, optimizer: Optimizer, num_processes: int = 1):\n def wrap_check_needless_num_warmup_steps(return_vals):\ndef get_random_resolution_in_bucket(bucket_resolution: int = 512) -> tuple[int, int]:\ndef text2img(pipe: DiffusionPipeline,\n prompts: Union[str, list[str]], \n negative_prompt: Union[str, list[str]] = \"\", \n width: int = 512, \n height: int = 512,\n num_inference_steps: int = 30,\n guidance_scale: int = 7.5,\n seed: int = None,\n generate_num: int = 1,\n tag: str = \"\",\n **kwargs):\ndef latent2img(pipe: DiffusionPipeline,\n scheduler,\n noise_pred: torch.FloatTensor,\n latents: torch.FloatTensor,\n timestep: int,\n tag: str = \"ori\",\n **kwargs):" }, { "identifier": "model_util", "path": "src/models/model_util.py", "snippet": "TOKENIZER_V1_MODEL_NAME = \"CompVis/stable-diffusion-v1-4\"\nTOKENIZER_V2_MODEL_NAME = \"stabilityai/stable-diffusion-2-1\"\nAVAILABLE_SCHEDULERS = Literal[\"ddim\", \"ddpm\", \"lms\", \"euler_a\"]\nSDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]\nDIFFUSERS_CACHE_DIR = \".cache/\" # if you want to change the cache dir, change this\nLOCAL_ONLY = False # if you want to use only local files, change this\ndef load_diffusers_model(\n pretrained_model_name_or_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:\ndef load_checkpoint_model(\n checkpoint_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, DiffusionPipeline]:\ndef load_models(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n v2: bool = False,\n v_pred: bool = False,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin, DiffusionPipeline, ]:\ndef load_diffusers_model_xl(\n pretrained_model_name_or_path: str,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:\ndef load_checkpoint_model_xl(\n checkpoint_path: str,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel, DiffusionPipeline, ]:\ndef load_models_xl(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[\ndef create_noise_scheduler(\n scheduler_name: AVAILABLE_SCHEDULERS = \"ddpm\",\n prediction_type: Literal[\"epsilon\", \"v_prediction\"] = \"epsilon\",\n) -> SchedulerMixin:" }, { "identifier": "SPMLayer", "path": "src/models/spm.py", "snippet": "class SPMLayer(nn.Module):\n \"\"\"\n replaces forward method of the original Linear, instead of replacing the original Linear module.\n \"\"\"\n\n def __init__(\n self,\n spm_name,\n org_module: nn.Module,\n multiplier=1.0,\n dim=4,\n alpha=1,\n ):\n \"\"\"if alpha == 0 or None, alpha is rank (no scaling).\"\"\"\n super().__init__()\n self.spm_name = spm_name\n self.dim = dim\n\n if org_module.__class__.__name__ == \"Linear\":\n in_dim = org_module.in_features\n out_dim = org_module.out_features\n self.lora_down = nn.Linear(in_dim, dim, bias=False)\n self.lora_up = nn.Linear(dim, out_dim, bias=False)\n\n elif org_module.__class__.__name__ == \"Conv2d\":\n in_dim = org_module.in_channels\n out_dim = org_module.out_channels\n\n self.dim = min(self.dim, in_dim, out_dim)\n if self.dim != dim:\n print(f\"{spm_name} dim (rank) is changed to: {self.dim}\")\n\n kernel_size = org_module.kernel_size\n stride = org_module.stride\n padding = org_module.padding\n self.lora_down = nn.Conv2d(\n in_dim, self.dim, kernel_size, stride, padding, bias=False\n )\n self.lora_up = nn.Conv2d(self.dim, out_dim, (1, 1), (1, 1), bias=False)\n\n if type(alpha) == torch.Tensor:\n alpha = alpha.detach().numpy()\n alpha = dim if alpha is None or alpha == 0 else alpha\n self.scale = alpha / self.dim\n self.register_buffer(\"alpha\", torch.tensor(alpha))\n\n # same as microsoft's\n nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_up.weight)\n\n self.multiplier = multiplier\n self.org_module = org_module # remove in applying\n\n def apply_to(self):\n self.org_forward = self.org_module.forward\n self.org_module.forward = self.forward\n del self.org_module\n\n def forward(self, x):\n return (\n self.org_forward(x)\n + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale\n )" }, { "identifier": "SPMNetwork", "path": "src/models/spm.py", "snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2D\",\n ]\n\n SPM_PREFIX_UNET = \"lora_unet\" # aligning with SD webui usage\n DEFAULT_TARGET_REPLACE = UNET_TARGET_REPLACE_MODULE_TRANSFORMER\n\n def __init__(\n self,\n unet: UNet2DConditionModel,\n rank: int = 4,\n multiplier: float = 1.0,\n alpha: float = 1.0,\n module = SPMLayer,\n module_kwargs = None,\n ) -> None:\n super().__init__()\n\n self.multiplier = multiplier\n self.dim = rank\n self.alpha = alpha\n\n self.module = module\n self.module_kwargs = module_kwargs or {}\n\n # unet spm\n self.unet_spm_layers = self.create_modules(\n SPMNetwork.SPM_PREFIX_UNET,\n unet,\n SPMNetwork.DEFAULT_TARGET_REPLACE,\n self.dim,\n self.multiplier,\n )\n print(f\"Create SPM for U-Net: {len(self.unet_spm_layers)} modules.\")\n\n spm_names = set()\n for spm_layer in self.unet_spm_layers:\n assert (\n spm_layer.spm_name not in spm_names\n ), f\"duplicated SPM layer name: {spm_layer.spm_name}. {spm_names}\"\n spm_names.add(spm_layer.spm_name)\n\n for spm_layer in self.unet_spm_layers:\n spm_layer.apply_to()\n self.add_module(\n spm_layer.spm_name,\n spm_layer,\n )\n\n del unet\n\n torch.cuda.empty_cache()\n\n def create_modules(\n self,\n prefix: str,\n root_module: nn.Module,\n target_replace_modules: List[str],\n rank: int,\n multiplier: float,\n ) -> list:\n spm_layers = []\n\n for name, module in root_module.named_modules():\n if module.__class__.__name__ in target_replace_modules:\n for child_name, child_module in module.named_modules():\n if child_module.__class__.__name__ in [\"Linear\", \"Conv2d\"]:\n spm_name = prefix + \".\" + name + \".\" + child_name\n spm_name = spm_name.replace(\".\", \"_\")\n print(f\"{spm_name}\")\n spm_layer = self.module(\n spm_name, child_module, multiplier, rank, self.alpha, **self.module_kwargs\n )\n spm_layers.append(spm_layer)\n\n return spm_layers\n\n def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):\n all_params = []\n\n if self.unet_spm_layers:\n params = []\n [params.extend(spm_layer.parameters()) for spm_layer in self.unet_spm_layers]\n param_data = {\"params\": params}\n if default_lr is not None:\n param_data[\"lr\"] = default_lr\n all_params.append(param_data)\n\n return all_params\n\n def save_weights(self, file, dtype=None, metadata: Optional[dict] = None):\n state_dict = self.state_dict()\n\n if dtype is not None:\n for key in list(state_dict.keys()):\n v = state_dict[key]\n v = v.detach().clone().to(\"cpu\").to(dtype)\n state_dict[key] = v\n\n for key in list(state_dict.keys()):\n if not key.startswith(\"lora\"):\n del state_dict[key]\n\n if os.path.splitext(file)[1] == \".safetensors\":\n save_file(state_dict, file, metadata)\n else:\n torch.save(state_dict, file)\n\n def __enter__(self):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 1.0\n\n def __exit__(self, exc_type, exc_value, tb):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 0" }, { "identifier": "load_state_dict", "path": "src/models/merge_spm.py", "snippet": "def load_state_dict(file_name, dtype):\n if os.path.splitext(file_name)[1] == \".safetensors\":\n sd = load_file(file_name)\n metadata = load_metadata_from_safetensors(file_name)\n else:\n sd = torch.load(file_name, map_location=\"cpu\")\n metadata = {}\n\n for key in list(sd.keys()):\n if type(sd[key]) == torch.Tensor:\n sd[key] = sd[key].to(dtype)\n\n return sd, metadata" }, { "identifier": "SLDPipeline", "path": "src/misc/sld_pipeline.py", "snippet": "class SLDPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline for text-to-image generation using Safe Latent Diffusion.\n\n The implementation is based on the [`StableDiffusionPipeline`]\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPFeatureExtractor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n ],\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPFeatureExtractor,\n ):\n super().__init__()\n safety_concept: Optional[str] = 'hate, harassment, violence, suffering, humiliation, harm, suicide, ' \\\n 'sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, ' \\\n 'drug use, theft, vandalism, weapons, child abuse, brutality, cruelty'\n\n if hasattr(scheduler.config, \"steps_offset\") and scheduler.config.steps_offset != 1:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`\"\n f\" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure \"\n \"to update the config accordingly as leaving `steps_offset` might led to incorrect results\"\n \" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,\"\n \" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`\"\n \" file\"\n )\n deprecate(\"steps_offset!=1\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"steps_offset\"] = 1\n scheduler._internal_dict = FrozenDict(new_config)\n\n if hasattr(scheduler.config, \"clip_sample\") and scheduler.config.clip_sample is True:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`.\"\n \" `clip_sample` should be set to False in the configuration file. Please make sure to update the\"\n \" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in\"\n \" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very\"\n \" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file\"\n )\n deprecate(\"clip_sample not set\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"clip_sample\"] = False\n scheduler._internal_dict = FrozenDict(new_config)\n\n if safety_checker is None:\n logger.warn(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self._safety_text_concept = safety_concept\n\n @property\n def safety_concept(self):\n r\"\"\"\n Getter method for the safety concept used with SLD\n\n Returns:\n `str`:\n The text describing the safety concept\n \"\"\"\n return self._safety_text_concept\n\n @safety_concept.setter\n def safety_concept(self, concept):\n r\"\"\"\n Setter method for the safety concept used with SLD\n\n Args:\n concept (`str`):\n The text of the new safety concept\n \"\"\"\n self._safety_text_concept = concept\n\n def enable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Enable memory efficient attention as implemented in xformers.\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference\n time. Speed up at training time is not guaranteed.\n\n Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention\n is used.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(True)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention as implemented in xformers.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(False)\n\n def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = \"auto\"):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,\n `attention_head_dim` must be a multiple of `slice_size`.\n \"\"\"\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = self.unet.config.attention_head_dim // 2\n self.unet.set_attention_slice(slice_size)\n\n def disable_attention_slicing(self):\n r\"\"\"\n Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go\n back to computing attention in one step.\n \"\"\"\n # set slice_size = `None` to disable `attention slicing`\n self.enable_attention_slicing(None)\n\n def enable_sequential_cpu_offload(self):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(\"cuda\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n height: int = 512,\n width: int = 512,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[torch.Generator] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n sld_guidance_scale: Optional[float] = 1000,\n sld_warmup_steps: Optional[int] = 10,\n sld_threshold: Optional[float] = 0.01,\n sld_momentum_scale: Optional[float] = 0.3,\n sld_mom_beta: Optional[float] = 0.4,\n **kwargs,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n height (`int`, *optional*, defaults to 512):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to 512):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator`, *optional*):\n A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation\n deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n sld_guidance_scale (`float`, *optional*, defaults to 1000):\n The guidance scale of safe latent diffusion. If set to be less than 1, safety guidance will be disabled.\n sld_warmup_steps (`int`, *optional*, defaults to 10):\n Number of warmup steps for safety guidance. SLD will only be applied for diffusion steps greater\n than `sld_warmup_steps`.\n sld_threshold (`float`, *optional*, defaults to 0.01):\n Threshold that separates the hyperplane between appropriate and inappropriate images.\n sld_momentum_scale (`float`, *optional*, defaults to 0.3):\n Scale of the SLD momentum to be added to the safety guidance at each diffusion step.\n If set to 0.0 momentum will be disabled. Momentum is already built up during warmup,\n i.e. for diffusion steps smaller than `sld_warmup_steps`.\n sld_mom_beta (`float`, *optional*, defaults to 0.4):\n Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous\n momentum will be kept. Momentum is already built up during warmup, i.e. for diffusion steps smaller than\n `sld_warmup_steps`.\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n if isinstance(prompt, str):\n batch_size = 1\n elif isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n enable_safety_guidance = True\n if sld_guidance_scale < 1:\n enable_safety_guidance = False\n logger.warn('You have disabled safety guidance.')\n\n # get prompt text embeddings\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n\n if text_input_ids.shape[-1] > self.tokenizer.model_max_length:\n removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]\n text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)\n text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = text_input_ids.shape[-1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)\n uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # Encode the safety concept text\n if enable_safety_guidance:\n safety_concept_input = self.tokenizer(\n [self._safety_text_concept],\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0]\n\n # duplicate safety embeddings for each generation per prompt, using mps friendly method\n seq_len = safety_embeddings.shape[1]\n safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1)\n safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings, safety_embeddings])\n\n else:\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n # get the initial random noise unless the user supplied it\n\n # Unlike in other pipelines, latents need to be generated in the target device\n # for 1-to-1 results reproducibility with the CompVis implementation.\n # However this currently doesn't work in `mps`.\n latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)\n latents_dtype = text_embeddings.dtype\n if latents is None:\n if self.device.type == \"mps\":\n # randn does not work reproducibly on mps\n latents = torch.randn(latents_shape, generator=generator, device=\"cpu\", dtype=latents_dtype).to(\n self.device\n )\n else:\n latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)\n else:\n if latents.shape != latents_shape:\n raise ValueError(f\"Unexpected latents shape, got {latents.shape}, expected {latents_shape}\")\n latents = latents.to(self.device)\n\n # set timesteps\n self.scheduler.set_timesteps(num_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # Some schedulers like PNDM have timesteps as arrays\n # It's more optimized to move all timesteps to correct device beforehand\n timesteps_tensor = self.scheduler.timesteps.to(self.device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n\n safety_momentum = None\n\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * (3 if enable_safety_guidance else 2)) \\\n if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2))\n noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]\n\n # default classifier free guidance\n noise_guidance = (noise_pred_text - noise_pred_uncond)\n\n # Perform SLD guidance\n if enable_safety_guidance:\n if safety_momentum is None:\n safety_momentum = torch.zeros_like(noise_guidance)\n noise_pred_safety_concept = noise_pred_out[2]\n\n # Equation 6\n scale = torch.clamp(\n torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.)\n\n # Equation 6\n safety_concept_scale = torch.where(\n (noise_pred_text - noise_pred_safety_concept) >= sld_threshold,\n torch.zeros_like(scale), scale)\n\n # Equation 4\n noise_guidance_safety = torch.mul(\n (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale)\n\n # Equation 7\n noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum\n\n # Equation 8\n safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety\n\n if i >= sld_warmup_steps: # Warmup\n # Equation 3\n noise_guidance = noise_guidance - noise_guidance_safety\n\n noise_pred = noise_pred_uncond + guidance_scale * noise_guidance\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents).sample\n\n image = (image / 2 + 0.5).clamp(0, 1)\n\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n\n if self.safety_checker is not None:\n safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors=\"pt\").to(\n self.device\n )\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)\n )\n else:\n has_nsfw_concept = None\n\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return SLDPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept,\n applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None)" } ]
import argparse import gc import warnings import torch from pathlib import Path from typing import Literal from torch.utils.data import DataLoader from accelerate import PartialState, Accelerator from src.configs import config from src.configs.config import RootConfig from src.configs.generation_config import GenerationConfig from src.engine import train_util from src.evaluation import * from src.models import model_util from src.models.spm import SPMLayer, SPMNetwork from src.models.merge_spm import load_state_dict from src.misc.sld_pipeline import SLDPipeline
11,942
dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[ load_state_dict(spm_model_path, weight_dtype) for spm_model_path in spm_model_paths ] ) # check if SPMs are compatible assert all([metadata["rank"] == metadatas[0]["rank"] for metadata in metadatas]) # get the erased concept erased_prompts = [md["prompts"].split(",") for md in metadatas] erased_prompts_count = [len(ep) for ep in erased_prompts] print(f"Erased prompts: {erased_prompts}") erased_prompts_flatten = [item for sublist in erased_prompts for item in sublist] erased_prompt_embeds, erased_prompt_tokens = train_util.encode_prompts( tokenizer, text_encoder, erased_prompts_flatten, return_tokens=True ) # create the SPM network
DIFFUSERS_CACHE_DIR = ".cache/" UNET_NAME = "unet" TEXT_ENCODER_NAME = "text_encoder" MATCHING_METRICS = Literal[ "clipcos", "clipcos_tokenuni", "tokenuni", "allone", ] distributed_state = PartialState() accelerator = Accelerator() def flush(): torch.cuda.empty_cache() gc.collect() def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[ load_state_dict(spm_model_path, weight_dtype) for spm_model_path in spm_model_paths ] ) # check if SPMs are compatible assert all([metadata["rank"] == metadatas[0]["rank"] for metadata in metadatas]) # get the erased concept erased_prompts = [md["prompts"].split(",") for md in metadatas] erased_prompts_count = [len(ep) for ep in erased_prompts] print(f"Erased prompts: {erased_prompts}") erased_prompts_flatten = [item for sublist in erased_prompts for item in sublist] erased_prompt_embeds, erased_prompt_tokens = train_util.encode_prompts( tokenizer, text_encoder, erased_prompts_flatten, return_tokens=True ) # create the SPM network
network = SPMNetwork(
6
2023-12-26 03:19:16+00:00
16k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n init: str = \"default\",\n init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n init:\n The initializer to use. Choose from:\n\n \"default\": LeCun fan-in truncated normal initialization\n \"relu\": He initialization w/ truncated normal distribution\n \"glorot\": Fan-average Glorot uniform initialization\n \"gating\": Weights=0, Bias=1\n \"normal\": Normal initialization with std=1/sqrt(fan_in)\n \"final\": Weights=0, Bias=0\n\n Overridden by init_fn if the latter is not None.\n init_fn:\n A custom initializer taking weight and bias as inputs.\n Overrides init if not None.\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)\n\n with torch.no_grad():\n if init_fn is not None:\n init_fn(self.weight, self.bias)\n else:\n if init == \"default\":\n lecun_normal_init_(self.weight)\n elif init == \"relu\":\n he_normal_init_(self.weight)\n elif init == \"glorot\":\n glorot_uniform_init_(self.weight)\n elif init == \"gating\":\n gating_init_(self.weight)\n if bias:\n self.bias.fill_(1.0)\n elif init == \"normal\":\n normal_init_(self.weight)\n elif init == \"final\":\n final_init_(self.weight)\n else:\n raise ValueError(\"Invalid init string.\")" }, { "identifier": "LayerNorm", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n d = x.dtype\n # deepspeed_is_initialized = (\n # deepspeed_is_installed and \n # deepspeed.utils.is_initialized()\n # )\n # if(d is torch.bfloat16 and not deepspeed_is_initialized):\n # with torch.cuda.amp.autocast(enabled=False):\n # out = nn.functional.layer_norm(\n # x, \n # self.c_in, \n # self.weight.to(dtype=d), \n # self.bias.to(dtype=d), \n # self.eps\n # )\n # else:\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "ipa_point_weights_init_", "path": "frame2seq/openfold/model/primitives.py", "snippet": "def ipa_point_weights_init_(weights):\n with torch.no_grad():\n softplus_inverse_1 = 0.541324854612918\n weights.fill_(softplus_inverse_1)" }, { "identifier": "restype_rigid_group_default_frame", "path": "frame2seq/openfold/np/residue_constants.py", "snippet": "def load_stereo_chemical_props() -> Tuple[\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\ndef _make_rigid_group_constants():\ndef make_atom14_dists_bounds(\n overlap_tolerance=1.5, bond_length_tolerance_factor=15\n):\ndef _make_atom14_ambiguity_feats():\ndef aatype_to_str_sequence(aatype):\nHHBLITS_AA_TO_ID = {\n \"A\": 0,\n \"B\": 2,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"J\": 20,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"O\": 20,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"U\": 1,\n \"V\": 17,\n \"W\": 18,\n \"X\": 20,\n \"Y\": 19,\n \"Z\": 3,\n \"-\": 21,\n}\nID_TO_HHBLITS_AA = {\n 0: \"A\",\n 1: \"C\", # Also U.\n 2: \"D\", # Also B.\n 3: \"E\", # Also Z.\n 4: \"F\",\n 5: \"G\",\n 6: \"H\",\n 7: \"I\",\n 8: \"K\",\n 9: \"L\",\n 10: \"M\",\n 11: \"N\",\n 12: \"P\",\n 13: \"Q\",\n 14: \"R\",\n 15: \"S\",\n 16: \"T\",\n 17: \"V\",\n 18: \"W\",\n 19: \"Y\",\n 20: \"X\", # Includes J and O.\n 21: \"-\",\n}\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])\n for i in range(len(restypes_with_x_and_gap))\n)\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()" }, { "identifier": "frames_and_literature_positions_to_atom14_pos", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def frames_and_literature_positions_to_atom14_pos(\n r: Rigid,\n aatype: torch.Tensor,\n default_frames,\n group_idx,\n atom_mask,\n lit_positions,\n):\n # [*, N, 14, 4, 4]\n default_4x4 = default_frames[aatype, ...]\n\n # [*, N, 14]\n group_mask = group_idx[aatype, ...]\n\n # [*, N, 14, 8]\n group_mask = nn.functional.one_hot(\n group_mask,\n num_classes=default_frames.shape[-3],\n )\n\n # [*, N, 14, 8]\n t_atoms_to_global = r[..., None, :] * group_mask\n\n # [*, N, 14]\n t_atoms_to_global = t_atoms_to_global.map_tensor_fn(\n lambda x: torch.sum(x, dim=-1)\n )\n\n # [*, N, 14, 1]\n atom_mask = atom_mask[aatype, ...].unsqueeze(-1)\n\n # [*, N, 14, 3]\n lit_positions = lit_positions[aatype, ...]\n pred_positions = t_atoms_to_global.apply(lit_positions)\n pred_positions = pred_positions * atom_mask\n\n return pred_positions" }, { "identifier": "torsion_angles_to_frames", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def torsion_angles_to_frames(\n r: Rigid,\n alpha: torch.Tensor,\n aatype: torch.Tensor,\n rrgdf: torch.Tensor,\n):\n # [*, N, 8, 4, 4]\n default_4x4 = rrgdf[aatype, ...]\n\n # [*, N, 8] transformations, i.e.\n # One [*, N, 8, 3, 3] rotation matrix and\n # One [*, N, 8, 3] translation matrix\n default_r = r.from_tensor_4x4(default_4x4)\n\n bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))\n bb_rot[..., 1] = 1\n\n # [*, N, 8, 2]\n alpha = torch.cat(\n [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2\n )\n\n # [*, N, 8, 3, 3]\n # Produces rotation matrices of the form:\n # [\n # [1, 0 , 0 ],\n # [0, a_2,-a_1],\n # [0, a_1, a_2]\n # ]\n # This follows the original code rather than the supplement, which uses\n # different indices.\n\n all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)\n all_rots[..., 0, 0] = 1\n all_rots[..., 1, 1] = alpha[..., 1]\n all_rots[..., 1, 2] = -alpha[..., 0]\n all_rots[..., 2, 1:] = alpha\n\n all_rots = Rigid(Rotation(rot_mats=all_rots), None)\n\n all_frames = default_r.compose(all_rots)\n\n chi2_frame_to_frame = all_frames[..., 5]\n chi3_frame_to_frame = all_frames[..., 6]\n chi4_frame_to_frame = all_frames[..., 7]\n\n chi1_frame_to_bb = all_frames[..., 4]\n chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)\n chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)\n chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)\n\n all_frames_to_bb = Rigid.cat(\n [\n all_frames[..., :5],\n chi2_frame_to_bb.unsqueeze(-1),\n chi3_frame_to_bb.unsqueeze(-1),\n chi4_frame_to_bb.unsqueeze(-1),\n ],\n dim=-1,\n )\n\n all_frames_to_global = r[..., None].compose(all_frames_to_bb)\n\n return all_frames_to_global" }, { "identifier": "is_fp16_enabled", "path": "frame2seq/openfold/utils/precision_utils.py", "snippet": "def is_fp16_enabled():\n # Autocast world\n try:\n fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16\n fp16_enabled = fp16_enabled and torch.is_autocast_enabled()\n except AttributeError:\n fp16_enabled = False\n\n return fp16_enabled" }, { "identifier": "Rotation", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")" }, { "identifier": "Rigid", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" } ]
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
12,511
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden
self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu")
0
2023-12-25 09:29:36+00:00
16k
KyanChen/TTP
mmseg/models/decode_heads/san_head.py
[ { "identifier": "TransformerEncoderLayer", "path": "mmseg/models/backbones/vit.py", "snippet": "class TransformerEncoderLayer(BaseModule):\n \"\"\"Implements one encoder layer in Vision Transformer.\n\n Args:\n embed_dims (int): The feature dimension.\n num_heads (int): Parallel attention heads.\n feedforward_channels (int): The hidden dimension for FFNs.\n drop_rate (float): Probability of an element to be zeroed\n after the feed forward layer. Default: 0.0.\n attn_drop_rate (float): The drop out rate for attention layer.\n Default: 0.0.\n drop_path_rate (float): stochastic depth rate. Default 0.0.\n num_fcs (int): The number of fully-connected layers for FFNs.\n Default: 2.\n qkv_bias (bool): enable bias for qkv if True. Default: True\n act_cfg (dict): The activation config for FFNs.\n Default: dict(type='GELU').\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='LN').\n batch_first (bool): Key, Query and Value are shape of\n (batch, n, embed_dim)\n or (n, batch, embed_dim). Default: True.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save\n some memory while slowing down the training speed. Default: False.\n \"\"\"\n\n def __init__(self,\n embed_dims,\n num_heads,\n feedforward_channels,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.,\n num_fcs=2,\n qkv_bias=True,\n act_cfg=dict(type='GELU'),\n norm_cfg=dict(type='LN'),\n batch_first=True,\n attn_cfg=dict(),\n ffn_cfg=dict(),\n with_cp=False):\n super().__init__()\n\n self.norm1_name, norm1 = build_norm_layer(\n norm_cfg, embed_dims, postfix=1)\n self.add_module(self.norm1_name, norm1)\n\n attn_cfg.update(\n dict(\n embed_dims=embed_dims,\n num_heads=num_heads,\n attn_drop=attn_drop_rate,\n proj_drop=drop_rate,\n batch_first=batch_first,\n bias=qkv_bias))\n\n self.build_attn(attn_cfg)\n\n self.norm2_name, norm2 = build_norm_layer(\n norm_cfg, embed_dims, postfix=2)\n self.add_module(self.norm2_name, norm2)\n\n ffn_cfg.update(\n dict(\n embed_dims=embed_dims,\n feedforward_channels=feedforward_channels,\n num_fcs=num_fcs,\n ffn_drop=drop_rate,\n dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate)\n if drop_path_rate > 0 else None,\n act_cfg=act_cfg))\n self.build_ffn(ffn_cfg)\n self.with_cp = with_cp\n\n def build_attn(self, attn_cfg):\n self.attn = MultiheadAttention(**attn_cfg)\n\n def build_ffn(self, ffn_cfg):\n self.ffn = FFN(**ffn_cfg)\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n return getattr(self, self.norm2_name)\n\n def forward(self, x):\n\n def _inner_forward(x):\n x = self.attn(self.norm1(x), identity=x)\n x = self.ffn(self.norm2(x), identity=x)\n return x\n\n if self.with_cp and x.requires_grad:\n x = cp.checkpoint(_inner_forward, x)\n else:\n x = _inner_forward(x)\n return x" }, { "identifier": "MODELS", "path": "mmseg/registry/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmseg.models'])" }, { "identifier": "ConfigType", "path": "mmseg/utils/typing_utils.py", "snippet": "" }, { "identifier": "MatchMasks", "path": "mmseg/utils/mask_classification.py", "snippet": "class MatchMasks:\n \"\"\"Match the predictions to category labels.\n\n Args:\n num_points (int): the number of sampled points to compute cost.\n num_queries (int): the number of prediction masks.\n num_classes (int): the number of classes.\n assigner (BaseAssigner): the assigner to compute matching.\n \"\"\"\n\n def __init__(self,\n num_points: int,\n num_queries: int,\n num_classes: int,\n assigner: ConfigType = None):\n assert assigner is not None, \"\\'assigner\\' in decode_head.train_cfg\" \\\n 'cannot be None'\n assert num_points > 0, 'num_points should be a positive integer.'\n self.num_points = num_points\n self.num_queries = num_queries\n self.num_classes = num_classes\n self.assigner = TASK_UTILS.build(assigner)\n\n def get_targets(self, cls_scores: List[Tensor], mask_preds: List[Tensor],\n batch_gt_instances: List[InstanceData]) -> Tuple:\n \"\"\"Compute best mask matches for all images for a decoder layer.\n\n Args:\n cls_scores (List[Tensor]): Mask score logits from a single\n decoder layer for all images. Each with shape (num_queries,\n cls_out_channels).\n mask_preds (List[Tensor]): Mask logits from a single decoder\n layer for all images. Each with shape (num_queries, h, w).\n batch_gt_instances (List[InstanceData]): each contains\n ``labels`` and ``masks``.\n\n Returns:\n tuple: a tuple containing the following targets.\n\n - labels (List[Tensor]): Labels of all images.\\\n Each with shape (num_queries, ).\n - mask_targets (List[Tensor]): Mask targets of\\\n all images. Each with shape (num_queries, h, w).\n - mask_weights (List[Tensor]): Mask weights of\\\n all images. Each with shape (num_queries, ).\n - avg_factor (int): Average factor that is used to\n average the loss. `avg_factor` is usually equal\n to the number of positive priors.\n \"\"\"\n batch_size = cls_scores.shape[0]\n results = dict({\n 'labels': [],\n 'mask_targets': [],\n 'mask_weights': [],\n })\n for i in range(batch_size):\n labels, mask_targets, mask_weights\\\n = self._get_targets_single(cls_scores[i],\n mask_preds[i],\n batch_gt_instances[i])\n results['labels'].append(labels)\n results['mask_targets'].append(mask_targets)\n results['mask_weights'].append(mask_weights)\n\n # shape (batch_size, num_queries)\n labels = torch.stack(results['labels'], dim=0)\n # shape (batch_size, num_gts, h, w)\n mask_targets = torch.cat(results['mask_targets'], dim=0)\n # shape (batch_size, num_queries)\n mask_weights = torch.stack(results['mask_weights'], dim=0)\n\n avg_factor = sum(\n [len(gt_instances.labels) for gt_instances in batch_gt_instances])\n\n res = (labels, mask_targets, mask_weights, avg_factor)\n\n return res\n\n def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor,\n gt_instances: InstanceData) \\\n -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Compute a set of best mask matches for one image.\n\n Args:\n cls_score (Tensor): Mask score logits from a single decoder layer\n for one image. Shape (num_queries, cls_out_channels).\n mask_pred (Tensor): Mask logits for a single decoder layer for one\n image. Shape (num_queries, h, w).\n gt_instances (:obj:`InstanceData`): It contains ``labels`` and\n ``masks``.\n\n Returns:\n tuple[Tensor]: A tuple containing the following for one image.\n\n - labels (Tensor): Labels of each image. \\\n shape (num_queries, ).\n - mask_targets (Tensor): Mask targets of each image. \\\n shape (num_queries, h, w).\n - mask_weights (Tensor): Mask weights of each image. \\\n shape (num_queries, ).\n \"\"\"\n gt_labels = gt_instances.labels\n gt_masks = gt_instances.masks\n # when \"gt_labels\" is empty, classify all queries to background\n if len(gt_labels) == 0:\n labels = gt_labels.new_full((self.num_queries, ),\n self.num_classes,\n dtype=torch.long)\n mask_targets = gt_labels\n mask_weights = gt_labels.new_zeros((self.num_queries, ))\n return labels, mask_targets, mask_weights\n # sample points\n num_queries = cls_score.shape[0]\n num_gts = gt_labels.shape[0]\n\n point_coords = torch.rand((1, self.num_points, 2),\n device=cls_score.device)\n # shape (num_queries, num_points)\n mask_points_pred = point_sample(\n mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1,\n 1)).squeeze(1)\n # shape (num_gts, num_points)\n gt_points_masks = point_sample(\n gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1,\n 1)).squeeze(1)\n\n sampled_gt_instances = InstanceData(\n labels=gt_labels, masks=gt_points_masks)\n sampled_pred_instances = InstanceData(\n scores=cls_score, masks=mask_points_pred)\n # assign and sample\n matched_quiery_inds, matched_label_inds = self.assigner.assign(\n pred_instances=sampled_pred_instances,\n gt_instances=sampled_gt_instances)\n labels = gt_labels.new_full((self.num_queries, ),\n self.num_classes,\n dtype=torch.long)\n labels[matched_quiery_inds] = gt_labels[matched_label_inds]\n\n mask_weights = gt_labels.new_zeros((self.num_queries, ))\n mask_weights[matched_quiery_inds] = 1\n mask_targets = gt_masks[matched_label_inds]\n\n return labels, mask_targets, mask_weights" }, { "identifier": "seg_data_to_instance_data", "path": "mmseg/utils/mask_classification.py", "snippet": "def seg_data_to_instance_data(ignore_index: int,\n batch_data_samples: SampleList):\n \"\"\"Convert the paradigm of ground truth from semantic segmentation to\n instance segmentation.\n\n Args:\n ignore_index (int): The label index to be ignored.\n batch_data_samples (List[SegDataSample]): The Data\n Samples. It usually includes information such as\n `gt_sem_seg`.\n\n Returns:\n tuple[Tensor]: A tuple contains two lists.\n - batch_gt_instances (List[InstanceData]): Batch of\n gt_instance. It usually includes ``labels``, each is\n unique ground truth label id of images, with\n shape (num_gt, ) and ``masks``, each is ground truth\n masks of each instances of a image, shape (num_gt, h, w).\n - batch_img_metas (List[Dict]): List of image meta information.\n \"\"\"\n batch_gt_instances = []\n\n for data_sample in batch_data_samples:\n gt_sem_seg = data_sample.gt_sem_seg.data\n classes = torch.unique(\n gt_sem_seg,\n sorted=False,\n return_inverse=False,\n return_counts=False)\n\n # remove ignored region\n gt_labels = classes[classes != ignore_index]\n\n masks = []\n for class_id in gt_labels:\n masks.append(gt_sem_seg == class_id)\n\n if len(masks) == 0:\n gt_masks = torch.zeros(\n (0, gt_sem_seg.shape[-2],\n gt_sem_seg.shape[-1])).to(gt_sem_seg).long()\n else:\n gt_masks = torch.stack(masks).squeeze(1).long()\n\n instance_data = InstanceData(labels=gt_labels, masks=gt_masks)\n batch_gt_instances.append(instance_data)\n return batch_gt_instances" }, { "identifier": "PatchEmbed", "path": "mmseg/models/utils/embed.py", "snippet": "class PatchEmbed(BaseModule):\n \"\"\"Image to Patch Embedding.\n\n We use a conv layer to implement PatchEmbed.\n\n Args:\n in_channels (int): The num of input channels. Default: 3\n embed_dims (int): The dimensions of embedding. Default: 768\n conv_type (str): The config dict for embedding\n conv layer type selection. Default: \"Conv2d\".\n kernel_size (int): The kernel_size of embedding conv. Default: 16.\n stride (int, optional): The slide stride of embedding conv.\n Default: None (Would be set as `kernel_size`).\n padding (int | tuple | string ): The padding length of\n embedding conv. When it is a string, it means the mode\n of adaptive padding, support \"same\" and \"corner\" now.\n Default: \"corner\".\n dilation (int): The dilation rate of embedding conv. Default: 1.\n bias (bool): Bias of embed conv. Default: True.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n input_size (int | tuple | None): The size of input, which will be\n used to calculate the out size. Only work when `dynamic_size`\n is False. Default: None.\n init_cfg (`mmengine.ConfigDict`, optional): The Config for\n initialization. Default: None.\n \"\"\"\n\n def __init__(self,\n in_channels=3,\n embed_dims=768,\n conv_type='Conv2d',\n kernel_size=16,\n stride=None,\n padding='corner',\n dilation=1,\n bias=True,\n norm_cfg=None,\n input_size=None,\n init_cfg=None):\n super().__init__(init_cfg=init_cfg)\n\n self.embed_dims = embed_dims\n if stride is None:\n stride = kernel_size\n\n kernel_size = to_2tuple(kernel_size)\n stride = to_2tuple(stride)\n dilation = to_2tuple(dilation)\n\n if isinstance(padding, str):\n self.adap_padding = AdaptivePadding(\n kernel_size=kernel_size,\n stride=stride,\n dilation=dilation,\n padding=padding)\n # disable the padding of conv\n padding = 0\n else:\n self.adap_padding = None\n padding = to_2tuple(padding)\n\n self.projection = build_conv_layer(\n dict(type=conv_type),\n in_channels=in_channels,\n out_channels=embed_dims,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=bias)\n\n if norm_cfg is not None:\n self.norm = build_norm_layer(norm_cfg, embed_dims)[1]\n else:\n self.norm = None\n\n if input_size:\n input_size = to_2tuple(input_size)\n # `init_out_size` would be used outside to\n # calculate the num_patches\n # when `use_abs_pos_embed` outside\n self.init_input_size = input_size\n if self.adap_padding:\n pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)\n input_h, input_w = input_size\n input_h = input_h + pad_h\n input_w = input_w + pad_w\n input_size = (input_h, input_w)\n\n # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\n h_out = (input_size[0] + 2 * padding[0] - dilation[0] *\n (kernel_size[0] - 1) - 1) // stride[0] + 1\n w_out = (input_size[1] + 2 * padding[1] - dilation[1] *\n (kernel_size[1] - 1) - 1) // stride[1] + 1\n self.init_out_size = (h_out, w_out)\n else:\n self.init_input_size = None\n self.init_out_size = None\n\n def forward(self, x):\n \"\"\"\n Args:\n x (Tensor): Has shape (B, C, H, W). In most case, C is 3.\n\n Returns:\n tuple: Contains merged results and its spatial shape.\n\n - x (Tensor): Has shape (B, out_h * out_w, embed_dims)\n - out_size (tuple[int]): Spatial shape of x, arrange as\n (out_h, out_w).\n \"\"\"\n\n if self.adap_padding:\n x = self.adap_padding(x)\n\n x = self.projection(x)\n out_size = (x.shape[2], x.shape[3])\n x = x.flatten(2).transpose(1, 2)\n if self.norm is not None:\n x = self.norm(x)\n return x, out_size" }, { "identifier": "get_uncertain_point_coords_with_randomness", "path": "mmseg/models/utils/point_sample.py", "snippet": "def get_uncertain_point_coords_with_randomness(\n mask_preds: Tensor, labels: Tensor, num_points: int,\n oversample_ratio: float, importance_sample_ratio: float) -> Tensor:\n \"\"\"Get ``num_points`` most uncertain points with random points during\n train.\n\n Sample points in [0, 1] x [0, 1] coordinate space based on their\n uncertainty. The uncertainties are calculated for each point using\n 'get_uncertainty()' function that takes point's logit prediction as\n input.\n\n Args:\n mask_preds (Tensor): A tensor of shape (num_rois, num_classes,\n mask_height, mask_width) for class-specific or class-agnostic\n prediction.\n labels (Tensor): The ground truth class for each instance.\n num_points (int): The number of points to sample.\n oversample_ratio (float): Oversampling parameter.\n importance_sample_ratio (float): Ratio of points that are sampled\n via importnace sampling.\n\n Returns:\n point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n that contains the coordinates sampled points.\n \"\"\"\n assert oversample_ratio >= 1\n assert 0 <= importance_sample_ratio <= 1\n batch_size = mask_preds.shape[0]\n num_sampled = int(num_points * oversample_ratio)\n point_coords = torch.rand(\n batch_size, num_sampled, 2, device=mask_preds.device)\n point_logits = point_sample(mask_preds, point_coords)\n # It is crucial to calculate uncertainty based on the sampled\n # prediction value for the points. Calculating uncertainties of the\n # coarse predictions first and sampling them for points leads to\n # incorrect results. To illustrate this: assume uncertainty func(\n # logits)=-abs(logits), a sampled point between two coarse\n # predictions with -1 and 1 logits has 0 logits, and therefore 0\n # uncertainty value. However, if we calculate uncertainties for the\n # coarse predictions first, both will have -1 uncertainty,\n # and sampled point will get -1 uncertainty.\n point_uncertainties = get_uncertainty(point_logits, labels)\n num_uncertain_points = int(importance_sample_ratio * num_points)\n num_random_points = num_points - num_uncertain_points\n idx = torch.topk(\n point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]\n shift = num_sampled * torch.arange(\n batch_size, dtype=torch.long, device=mask_preds.device)\n idx += shift[:, None]\n point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(\n batch_size, num_uncertain_points, 2)\n if num_random_points > 0:\n rand_roi_coords = torch.rand(\n batch_size, num_random_points, 2, device=mask_preds.device)\n point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)\n return point_coords" }, { "identifier": "resize", "path": "mmseg/models/utils/wrappers.py", "snippet": "def resize(input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n warning=True):\n if warning:\n if size is not None and align_corners:\n input_h, input_w = tuple(int(x) for x in input.shape[2:])\n output_h, output_w = tuple(int(x) for x in size)\n if output_h > input_h or output_w > output_h:\n if ((output_h > 1 and output_w > 1 and input_h > 1\n and input_w > 1) and (output_h - 1) % (input_h - 1)\n and (output_w - 1) % (input_w - 1)):\n warnings.warn(\n f'When align_corners={align_corners}, '\n 'the output would more aligned if '\n f'input size {(input_h, input_w)} is `x+1` and '\n f'out size {(output_h, output_w)} is `nx+1`')\n return F.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "MLP", "path": "mmseg/models/utils/san_layers.py", "snippet": "class MLP(nn.Module):\n \"\"\"Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self,\n input_dim,\n hidden_dim,\n output_dim,\n num_layers,\n affine_func=nn.Linear):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(\n affine_func(n, k)\n for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x: torch.Tensor):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "LayerNorm2d", "path": "mmseg/models/utils/san_layers.py", "snippet": "class LayerNorm2d(nn.Module):\n \"\"\"A LayerNorm variant, popularized by Transformers, that performs point-\n wise mean and variance normalization over the channel dimension for inputs\n that have shape (batch_size, channels, height, width).\n\n https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950\n \"\"\"\n\n def __init__(self, normalized_shape, eps=1e-6):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(normalized_shape))\n self.bias = nn.Parameter(torch.zeros(normalized_shape))\n self.eps = eps\n self.normalized_shape = (normalized_shape, )\n\n def forward(self, x: torch.Tensor):\n u = x.mean(1, keepdim=True)\n s = (x - u).pow(2).mean(1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n x = self.weight[:, None, None] * x + self.bias[:, None, None]\n return x" }, { "identifier": "cross_attn_layer", "path": "mmseg/models/utils/san_layers.py", "snippet": "def cross_attn_layer(tf_layer: BaseTransformerLayer, x, mem, attn_bias):\n \"\"\"Implementation of transformer layer with cross attention. The cross\n attention shares the embedding weights with self-attention of tf_layer.\n Args:\n tf_layer: (TransformerEncoderLayer): The Module of transformer layer.\n x (Tensor): query [K,N,C]\n mem (Tensor): key and value [L,N,C]\n attn_bias (Tensor): attention bias [N*num_head,K,L]\n\n Return:\n x (Tensor): cross attention output [K,N,C]\n \"\"\"\n self_attn_layer = tf_layer.attentions[0].attn\n attn_layer_paras = {\n 'embed_dim_to_check': self_attn_layer.embed_dim,\n 'num_heads': self_attn_layer.num_heads,\n 'in_proj_weight': self_attn_layer.in_proj_weight,\n 'in_proj_bias': self_attn_layer.in_proj_bias,\n 'bias_k': self_attn_layer.bias_k,\n 'bias_v': self_attn_layer.bias_v,\n 'add_zero_attn': self_attn_layer.add_zero_attn,\n 'dropout_p': self_attn_layer.dropout,\n 'out_proj_weight': self_attn_layer.out_proj.weight,\n 'out_proj_bias': self_attn_layer.out_proj.bias,\n 'training': self_attn_layer.training\n }\n\n q_x = tf_layer.norms[0](x)\n k_x = v_x = tf_layer.norms[0](mem)\n x = x + cross_attn_with_self_bias(\n q_x,\n k_x,\n v_x,\n attn_mask=attn_bias,\n need_weights=False,\n **attn_layer_paras)[0]\n x = tf_layer.ffns[0](tf_layer.norms[1](x), identity=x)\n return x" }, { "identifier": "BaseDecodeHead", "path": "mmseg/models/decode_heads/decode_head.py", "snippet": "class BaseDecodeHead(BaseModule, metaclass=ABCMeta):\n \"\"\"Base class for BaseDecodeHead.\n\n 1. The ``init_weights`` method is used to initialize decode_head's\n model parameters. After segmentor initialization, ``init_weights``\n is triggered when ``segmentor.init_weights()`` is called externally.\n\n 2. The ``loss`` method is used to calculate the loss of decode_head,\n which includes two steps: (1) the decode_head model performs forward\n propagation to obtain the feature maps (2) The ``loss_by_feat`` method\n is called based on the feature maps to calculate the loss.\n\n .. code:: text\n\n loss(): forward() -> loss_by_feat()\n\n 3. The ``predict`` method is used to predict segmentation results,\n which includes two steps: (1) the decode_head model performs forward\n propagation to obtain the feature maps (2) The ``predict_by_feat`` method\n is called based on the feature maps to predict segmentation results\n including post-processing.\n\n .. code:: text\n\n predict(): forward() -> predict_by_feat()\n\n Args:\n in_channels (int|Sequence[int]): Input channels.\n channels (int): Channels after modules, before conv_seg.\n num_classes (int): Number of classes.\n out_channels (int): Output channels of conv_seg. Default: None.\n threshold (float): Threshold for binary segmentation in the case of\n `num_classes==1`. Default: None.\n dropout_ratio (float): Ratio of dropout layer. Default: 0.1.\n conv_cfg (dict|None): Config of conv layers. Default: None.\n norm_cfg (dict|None): Config of norm layers. Default: None.\n act_cfg (dict): Config of activation layers.\n Default: dict(type='ReLU')\n in_index (int|Sequence[int]): Input feature index. Default: -1\n input_transform (str|None): Transformation type of input features.\n Options: 'resize_concat', 'multiple_select', None.\n 'resize_concat': Multiple feature maps will be resize to the\n same size as first one and than concat together.\n Usually used in FCN head of HRNet.\n 'multiple_select': Multiple feature maps will be bundle into\n a list and passed into decode head.\n None: Only one select feature map is allowed.\n Default: None.\n loss_decode (dict | Sequence[dict]): Config of decode loss.\n The `loss_name` is property of corresponding loss function which\n could be shown in training log. If you want this loss\n item to be included into the backward graph, `loss_` must be the\n prefix of the name. Defaults to 'loss_ce'.\n e.g. dict(type='CrossEntropyLoss'),\n [dict(type='CrossEntropyLoss', loss_name='loss_ce'),\n dict(type='DiceLoss', loss_name='loss_dice')]\n Default: dict(type='CrossEntropyLoss').\n ignore_index (int | None): The label index to be ignored. When using\n masked BCE loss, ignore_index should be set to None. Default: 255.\n sampler (dict|None): The config of segmentation map sampler.\n Default: None.\n align_corners (bool): align_corners argument of F.interpolate.\n Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n \"\"\"\n\n def __init__(self,\n in_channels,\n channels,\n *,\n num_classes,\n out_channels=None,\n threshold=None,\n dropout_ratio=0.1,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n in_index=-1,\n input_transform=None,\n loss_decode=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n ignore_index=255,\n sampler=None,\n align_corners=False,\n init_cfg=dict(\n type='Normal', std=0.01, override=dict(name='conv_seg'))):\n super().__init__(init_cfg)\n self._init_inputs(in_channels, in_index, input_transform)\n self.channels = channels\n self.dropout_ratio = dropout_ratio\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.in_index = in_index\n\n self.ignore_index = ignore_index\n self.align_corners = align_corners\n\n if out_channels is None:\n if num_classes == 2:\n warnings.warn('For binary segmentation, we suggest using'\n '`out_channels = 1` to define the output'\n 'channels of segmentor, and use `threshold`'\n 'to convert `seg_logits` into a prediction'\n 'applying a threshold')\n out_channels = num_classes\n\n if out_channels != num_classes and out_channels != 1:\n raise ValueError(\n 'out_channels should be equal to num_classes,'\n 'except binary segmentation set out_channels == 1 and'\n f'num_classes == 2, but got out_channels={out_channels}'\n f'and num_classes={num_classes}')\n\n if out_channels == 1 and threshold is None:\n threshold = 0.3\n warnings.warn('threshold is not defined for binary, and defaults'\n 'to 0.3')\n self.num_classes = num_classes\n self.out_channels = out_channels\n self.threshold = threshold\n\n if isinstance(loss_decode, dict):\n self.loss_decode = build_loss(loss_decode)\n elif isinstance(loss_decode, (list, tuple)):\n self.loss_decode = nn.ModuleList()\n for loss in loss_decode:\n self.loss_decode.append(build_loss(loss))\n else:\n raise TypeError(f'loss_decode must be a dict or sequence of dict,\\\n but got {type(loss_decode)}')\n\n if sampler is not None:\n self.sampler = build_pixel_sampler(sampler, context=self)\n else:\n self.sampler = None\n\n self.conv_seg = nn.Conv2d(channels, self.out_channels, kernel_size=1)\n if dropout_ratio > 0:\n self.dropout = nn.Dropout2d(dropout_ratio)\n else:\n self.dropout = None\n\n def extra_repr(self):\n \"\"\"Extra repr.\"\"\"\n s = f'input_transform={self.input_transform}, ' \\\n f'ignore_index={self.ignore_index}, ' \\\n f'align_corners={self.align_corners}'\n return s\n\n def _init_inputs(self, in_channels, in_index, input_transform):\n \"\"\"Check and initialize input transforms.\n\n The in_channels, in_index and input_transform must match.\n Specifically, when input_transform is None, only single feature map\n will be selected. So in_channels and in_index must be of type int.\n When input_transform\n\n Args:\n in_channels (int|Sequence[int]): Input channels.\n in_index (int|Sequence[int]): Input feature index.\n input_transform (str|None): Transformation type of input features.\n Options: 'resize_concat', 'multiple_select', None.\n 'resize_concat': Multiple feature maps will be resize to the\n same size as first one and than concat together.\n Usually used in FCN head of HRNet.\n 'multiple_select': Multiple feature maps will be bundle into\n a list and passed into decode head.\n None: Only one select feature map is allowed.\n \"\"\"\n\n if input_transform is not None:\n assert input_transform in ['resize_concat', 'multiple_select']\n self.input_transform = input_transform\n self.in_index = in_index\n if input_transform is not None:\n assert isinstance(in_channels, (list, tuple))\n assert isinstance(in_index, (list, tuple))\n assert len(in_channels) == len(in_index)\n if input_transform == 'resize_concat':\n self.in_channels = sum(in_channels)\n else:\n self.in_channels = in_channels\n else:\n assert isinstance(in_channels, int)\n assert isinstance(in_index, int)\n self.in_channels = in_channels\n\n def _transform_inputs(self, inputs):\n \"\"\"Transform inputs for decoder.\n\n Args:\n inputs (list[Tensor]): List of multi-level img features.\n\n Returns:\n Tensor: The transformed inputs\n \"\"\"\n\n if self.input_transform == 'resize_concat':\n inputs = [inputs[i] for i in self.in_index]\n upsampled_inputs = [\n resize(\n input=x,\n size=inputs[0].shape[2:],\n mode='bilinear',\n align_corners=self.align_corners) for x in inputs\n ]\n inputs = torch.cat(upsampled_inputs, dim=1)\n elif self.input_transform == 'multiple_select':\n inputs = [inputs[i] for i in self.in_index]\n else:\n inputs = inputs[self.in_index]\n\n return inputs\n\n @abstractmethod\n def forward(self, inputs):\n \"\"\"Placeholder of forward function.\"\"\"\n pass\n\n def cls_seg(self, feat):\n \"\"\"Classify each pixel.\"\"\"\n if self.dropout is not None:\n feat = self.dropout(feat)\n output = self.conv_seg(feat)\n return output\n\n def loss(self, inputs: Tuple[Tensor], batch_data_samples: SampleList,\n train_cfg: ConfigType) -> dict:\n \"\"\"Forward function for training.\n\n Args:\n inputs (Tuple[Tensor]): List of multi-level img features.\n batch_data_samples (list[:obj:`SegDataSample`]): The seg\n data samples. It usually includes information such\n as `img_metas` or `gt_semantic_seg`.\n train_cfg (dict): The training config.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n seg_logits = self.forward(inputs)\n losses = self.loss_by_feat(seg_logits, batch_data_samples)\n return losses\n\n def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict],\n test_cfg: ConfigType) -> Tensor:\n \"\"\"Forward function for prediction.\n\n Args:\n inputs (Tuple[Tensor]): List of multi-level img features.\n batch_img_metas (dict): List Image info where each dict may also\n contain: 'img_shape', 'scale_factor', 'flip', 'img_path',\n 'ori_shape', and 'pad_shape'.\n For details on the values of these keys see\n `mmseg/datasets/pipelines/formatting.py:PackSegInputs`.\n test_cfg (dict): The testing config.\n\n Returns:\n Tensor: Outputs segmentation logits map.\n \"\"\"\n seg_logits = self.forward(inputs)\n\n return self.predict_by_feat(seg_logits, batch_img_metas)\n\n def _stack_batch_gt(self, batch_data_samples: SampleList) -> Tensor:\n gt_semantic_segs = [\n data_sample.gt_sem_seg.data for data_sample in batch_data_samples\n ]\n return torch.stack(gt_semantic_segs, dim=0)\n\n def loss_by_feat(self, seg_logits: Tensor,\n batch_data_samples: SampleList) -> dict:\n \"\"\"Compute segmentation loss.\n\n Args:\n seg_logits (Tensor): The output from decode head forward function.\n batch_data_samples (List[:obj:`SegDataSample`]): The seg\n data samples. It usually includes information such\n as `metainfo` and `gt_sem_seg`.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n\n seg_label = self._stack_batch_gt(batch_data_samples)\n loss = dict()\n seg_logits = resize(\n input=seg_logits,\n size=seg_label.shape[2:],\n mode='bilinear',\n align_corners=self.align_corners)\n if self.sampler is not None:\n seg_weight = self.sampler.sample(seg_logits, seg_label)\n else:\n seg_weight = None\n seg_label = seg_label.squeeze(1)\n\n if not isinstance(self.loss_decode, nn.ModuleList):\n losses_decode = [self.loss_decode]\n else:\n losses_decode = self.loss_decode\n for loss_decode in losses_decode:\n if loss_decode.loss_name not in loss:\n loss[loss_decode.loss_name] = loss_decode(\n seg_logits,\n seg_label,\n weight=seg_weight,\n ignore_index=self.ignore_index)\n else:\n loss[loss_decode.loss_name] += loss_decode(\n seg_logits,\n seg_label,\n weight=seg_weight,\n ignore_index=self.ignore_index)\n\n loss['acc_seg'] = accuracy(\n seg_logits, seg_label, ignore_index=self.ignore_index)\n return loss\n\n def predict_by_feat(self, seg_logits: Tensor,\n batch_img_metas: List[dict]) -> Tensor:\n \"\"\"Transform a batch of output seg_logits to the input shape.\n\n Args:\n seg_logits (Tensor): The output from decode head forward function.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n\n Returns:\n Tensor: Outputs segmentation logits map.\n \"\"\"\n\n if isinstance(batch_img_metas[0]['img_shape'], torch.Size):\n # slide inference\n size = batch_img_metas[0]['img_shape']\n elif 'pad_shape' in batch_img_metas[0]:\n size = batch_img_metas[0]['pad_shape'][:2]\n else:\n size = batch_img_metas[0]['img_shape']\n\n seg_logits = resize(\n input=seg_logits,\n size=size,\n mode='bilinear',\n align_corners=self.align_corners)\n return seg_logits" } ]
from functools import partial from typing import Dict, List, Tuple from mmcv.cnn import ConvModule, build_norm_layer from mmcv.cnn.bricks.transformer import BaseTransformerLayer from mmcv.ops import point_sample from mmengine.dist import all_reduce from mmengine.model.weight_init import (caffe2_xavier_init, normal_init, trunc_normal_) from mmengine.runner.checkpoint import CheckpointLoader, load_state_dict from mmengine.structures import InstanceData from torch import Tensor from torch.nn import functional as F from mmseg.models.backbones.vit import TransformerEncoderLayer from mmseg.registry import MODELS from mmseg.utils import (ConfigType, MatchMasks, SampleList, seg_data_to_instance_data) from ..utils import (MLP, LayerNorm2d, PatchEmbed, cross_attn_layer, get_uncertain_point_coords_with_randomness, resize) from .decode_head import BaseDecodeHead import torch import torch.nn as nn
12,157
""" def __init__(self, num_classes: int, san_cfg: ConfigType, maskgen_cfg: ConfigType, deep_supervision_idxs: List[int], train_cfg: ConfigType, **kwargs): super().__init__( in_channels=san_cfg.in_channels, channels=san_cfg.embed_dims, num_classes=num_classes, **kwargs) assert san_cfg.num_queries == maskgen_cfg.sos_token_num, \ 'num_queries in san_cfg should be equal to sos_token_num ' \ 'in maskgen_cfg' del self.conv_seg self.side_adapter_network = SideAdapterNetwork(**san_cfg) self.rec_with_attnbias = RecWithAttnbias(**maskgen_cfg) self.deep_supervision_idxs = deep_supervision_idxs self.train_cfg = train_cfg if train_cfg: self.match_masks = MatchMasks( num_points=train_cfg.num_points, num_queries=san_cfg.num_queries, num_classes=num_classes, assigner=train_cfg.assigner) def init_weights(self): rec_state_dict = None if isinstance(self.init_cfg, dict) and \ self.init_cfg.get('type') == 'Pretrained_Part': checkpoint = CheckpointLoader.load_checkpoint( self.init_cfg['checkpoint'], logger=None, map_location='cpu') rec_state_dict = checkpoint.copy() para_prefix = 'decode_head.rec_with_attnbias' prefix_len = len(para_prefix) + 1 for k, v in checkpoint.items(): rec_state_dict.pop(k) if para_prefix in k: rec_state_dict[k[prefix_len:]] = v self.side_adapter_network.init_weights() self.rec_with_attnbias.init_weights(rec_state_dict) def forward(self, inputs: Tuple[Tensor], deep_supervision_idxs) -> Tuple[List]: """Forward function. Args: inputs (Tuple[Tensor]): A triplet including images, list of multi-level visual features from image encoder and class embeddings from text_encoder. Returns: mask_props (List[Tensor]): Mask proposals predicted by SAN. mask_logits (List[Tensor]): Class logits of mask proposals. """ imgs, clip_feature, class_embeds = inputs # predict mask proposals and attention bias mask_props, attn_biases = self.side_adapter_network( imgs, clip_feature, deep_supervision_idxs) # mask recognition with attention bias mask_embeds = [ self.rec_with_attnbias(att_bias, clip_feature[-1]) for att_bias in attn_biases ] # Obtain class prediction of masks by comparing the similarity # between the image token and the text embedding of class names. mask_logits = [ torch.einsum('bqc,nc->bqn', mask_embed, class_embeds) for mask_embed in mask_embeds ] return mask_props, mask_logits def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict], test_cfg: ConfigType) -> Tensor: """Forward function for prediction. Args: inputs (Tuple[Tensor]): Images, visual features from image encoder and class embedding from text encoder. batch_img_metas (dict): List Image info where each dict may also contain: 'img_shape', 'scale_factor', 'flip', 'img_path', 'ori_shape', and 'pad_shape'. For details on the values of these keys see `mmseg/datasets/pipelines/formatting.py:PackSegInputs`. test_cfg (dict): The testing config. Returns: Tensor: Outputs segmentation logits map. """ mask_props, mask_logits = self.forward(inputs, []) return self.predict_by_feat([mask_props[-1], mask_logits[-1]], batch_img_metas) def predict_by_feat(self, seg_logits: List[Tensor], batch_img_metas: List[dict]) -> Tensor: """1. Transform a batch of mask proposals to the input shape. 2. Generate segmentation map with mask proposals and class logits. """ mask_pred = seg_logits[0] cls_score = seg_logits[1] if isinstance(batch_img_metas[0]['img_shape'], torch.Size): # slide inference size = batch_img_metas[0]['img_shape'] elif 'pad_shape' in batch_img_metas[0]: size = batch_img_metas[0]['pad_shape'][:2] else: size = batch_img_metas[0]['img_shape'] # upsample mask mask_pred = F.interpolate( mask_pred, size=size, mode='bilinear', align_corners=False) mask_cls = F.softmax(cls_score, dim=-1)[..., :-1] mask_pred = mask_pred.sigmoid() seg_logits = torch.einsum('bqc,bqhw->bchw', mask_cls, mask_pred) return seg_logits
# Copyright (c) OpenMMLab. All rights reserved. class MLPMaskDecoder(nn.Module): """Module for decoding query and visual features with MLP layers to generate the attention biases and the mask proposals.""" def __init__( self, *, in_channels: int, total_heads: int = 1, total_layers: int = 1, embed_channels: int = 256, mlp_channels: int = 256, mlp_num_layers: int = 3, rescale_attn_bias: bool = False, ): super().__init__() self.total_heads = total_heads self.total_layers = total_layers dense_affine_func = partial(nn.Conv2d, kernel_size=1) # Query Branch self.query_mlp = MLP(in_channels, mlp_channels, embed_channels, mlp_num_layers) # Pixel Branch self.pix_mlp = MLP( in_channels, mlp_channels, embed_channels, mlp_num_layers, affine_func=dense_affine_func, ) # Attention Bias Branch self.attn_mlp = MLP( in_channels, mlp_channels, embed_channels * self.total_heads * self.total_layers, mlp_num_layers, affine_func=dense_affine_func, ) if rescale_attn_bias: self.bias_scaling = nn.Linear(1, 1) else: self.bias_scaling = nn.Identity() def forward(self, query: torch.Tensor, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: """Forward function. Args: query (Tensor): Query Tokens [B,N,C]. x (Tensor): Visual features [B,C,H,W] Return: mask_preds (Tensor): Mask proposals. attn_bias (List[Tensor]): List of attention bias. """ query = self.query_mlp(query) pix = self.pix_mlp(x) b, c, h, w = pix.shape # preidict mask mask_preds = torch.einsum('bqc,bchw->bqhw', query, pix) # generate attn bias attn = self.attn_mlp(x) attn = attn.reshape(b, self.total_layers, self.total_heads, c, h, w) attn_bias = torch.einsum('bqc,blnchw->blnqhw', query, attn) attn_bias = self.bias_scaling(attn_bias[..., None]).squeeze(-1) attn_bias = attn_bias.chunk(self.total_layers, dim=1) attn_bias = [attn.squeeze(1) for attn in attn_bias] return mask_preds, attn_bias class SideAdapterNetwork(nn.Module): """Side Adapter Network for predicting mask proposals and attention bias. Args: in_channels (int): Number of input channels. Default: 3. clip_channels (int): Number of channels of visual features. Default: 768. embed_dims (int): embedding dimension. Default: 240. patch_size (int): The patch size. Default: 16. patch_bias (bool): Whether use bias in patch embedding. Default: True. num_queries (int): Number of queries for mask proposals. Default: 100. fusion_index (List[int]): The layer number of the encode transformer to fuse with the CLIP feature. Default: [0, 1, 2, 3]. cfg_encoder (ConfigType): Configs for the encode layers. cfg_decoder (ConfigType): Configs for the decode layers. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). """ def __init__( self, in_channels: int = 3, clip_channels: int = 768, embed_dims: int = 240, patch_size: int = 16, patch_bias: bool = True, num_queries: int = 100, fusion_index: list = [0, 1, 2, 3], cfg_encoder: ConfigType = ..., cfg_decoder: ConfigType = ..., norm_cfg: dict = dict(type='LN'), ): super().__init__() self.patch_embed = PatchEmbed( in_channels=in_channels, embed_dims=embed_dims, conv_type='Conv2d', kernel_size=patch_size, stride=patch_size, padding=0, input_size=(640, 640), bias=patch_bias, norm_cfg=None, init_cfg=None, ) ori_h, ori_w = self.patch_embed.init_out_size num_patches = ori_h * ori_w self.pos_embed = nn.Parameter( torch.randn(1, num_patches, embed_dims) * .02) self.query_pos_embed = nn.Parameter( torch.zeros(1, num_queries, embed_dims)) self.query_embed = nn.Parameter( torch.zeros(1, num_queries, embed_dims)) encode_layers = [] for i in range(cfg_encoder.num_encode_layer): encode_layers.append( TransformerEncoderLayer( embed_dims=embed_dims, num_heads=cfg_encoder.num_heads, feedforward_channels=cfg_encoder.mlp_ratio * embed_dims, norm_cfg=norm_cfg)) self.encode_layers = nn.ModuleList(encode_layers) conv_clips = [] for i in range(len(fusion_index)): conv_clips.append( nn.Sequential( LayerNorm2d(clip_channels), ConvModule( clip_channels, embed_dims, kernel_size=1, norm_cfg=None, act_cfg=None))) self.conv_clips = nn.ModuleList(conv_clips) self.fusion_index = fusion_index self.mask_decoder = MLPMaskDecoder( in_channels=embed_dims, total_heads=cfg_decoder.num_heads, total_layers=cfg_decoder.num_layers, embed_channels=cfg_decoder.embed_channels, mlp_channels=cfg_decoder.mlp_channels, mlp_num_layers=cfg_decoder.num_mlp, rescale_attn_bias=cfg_decoder.rescale) def init_weights(self): trunc_normal_(self.pos_embed, std=0.02) nn.init.normal_(self.query_embed, std=0.02) nn.init.normal_(self.query_pos_embed, std=0.02) for i in range(len(self.conv_clips)): caffe2_xavier_init(self.conv_clips[i][1].conv) def fuse_clip(self, fused_index: int, x: torch.Tensor, clip_feature: torch.Tensor, hwshape: Tuple[int, int], L: int): """Fuse CLIP feature and visual tokens.""" fused_clip = (resize( self.conv_clips[fused_index](clip_feature.contiguous()), size=hwshape, mode='bilinear', align_corners=False)).permute(0, 2, 3, 1).reshape(x[:, -L:, ...].shape) x = torch.cat([x[:, :-L, ...], x[:, -L:, ...] + fused_clip], dim=1) return x def encode_feature(self, image: torch.Tensor, clip_features: List[torch.Tensor], deep_supervision_idxs: List[int]) -> List[List]: """Encode images by a lightweight vision transformer.""" assert len(self.fusion_index) == len(clip_features) x, hwshape = self.patch_embed(image) ori_h, ori_w = self.patch_embed.init_out_size pos_embed = self.pos_embed if self.pos_embed.shape[1] != x.shape[1]: # resize the position embedding pos_embed = ( resize( self.pos_embed.reshape(1, ori_h, ori_w, -1).permute(0, 3, 1, 2), size=hwshape, mode='bicubic', align_corners=False, ).flatten(2).permute(0, 2, 1)) pos_embed = torch.cat([ self.query_pos_embed.expand(pos_embed.shape[0], -1, -1), pos_embed ], dim=1) x = torch.cat([self.query_embed.expand(x.shape[0], -1, -1), x], dim=1) x = x + pos_embed L = hwshape[0] * hwshape[1] fused_index = 0 if self.fusion_index[fused_index] == 0: x = self.fuse_clip(fused_index, x, clip_features[0][0], hwshape, L) fused_index += 1 outs = [] for index, block in enumerate(self.encode_layers, start=1): x = block(x) if index < len(self.fusion_index ) and index == self.fusion_index[fused_index]: x = self.fuse_clip(fused_index, x, clip_features[fused_index][0], hwshape, L) fused_index += 1 x_query = x[:, :-L, ...] x_feat = x[:, -L:, ...].permute(0, 2, 1)\ .reshape(x.shape[0], x.shape[-1], hwshape[0], hwshape[1]) if index in deep_supervision_idxs or index == len( self.encode_layers): outs.append({'query': x_query, 'x': x_feat}) if index < len(self.encode_layers): x = x + pos_embed return outs def decode_feature(self, features): mask_embeds = [] attn_biases = [] for feature in features: mask_embed, attn_bias = self.mask_decoder(**feature) mask_embeds.append(mask_embed) attn_biases.append(attn_bias) return mask_embeds, attn_biases def forward( self, image: torch.Tensor, clip_features: List[torch.Tensor], deep_supervision_idxs: List[int] ) -> Tuple[List[torch.Tensor], List[List[torch.Tensor]]]: """Forward function.""" features = self.encode_feature(image, clip_features, deep_supervision_idxs) mask_embeds, attn_biases = self.decode_feature(features) return mask_embeds, attn_biases class RecWithAttnbias(nn.Module): """Mask recognition module by applying the attention biases to rest deeper CLIP layers. Args: sos_token_format (str): The format of sos token. It should be chosen from ["cls_token", "learnable_token", "pos_embedding"]. Default: 'cls_token'. sos_token_num (int): Number of sos token. It should be equal to the number of quries. Default: 100. num_layers (int): Number of rest CLIP layers for mask recognition. Default: 3. cross_attn (bool): Whether use cross attention to update sos token. Default: False. embed_dims (int): The feature dimension of CLIP layers. Default: 768. num_heads (int): Parallel attention heads of CLIP layers. Default: 768. mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. Default: 4. qkv_bias (bool): Whether to use bias in multihead-attention. Default: True. out_dims (int): Number of channels of the output mask proposals. It should be equal to the out_dims of text_encoder. Default: 512. final_norm (True): Whether use norm layer for sos token. act_cfg (dict): The activation config for FFNs. Default: dict(type='GELU'). norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). frozen_exclude (List): List of parameters that are not to be frozen. """ def __init__(self, sos_token_format: str = 'cls_token', sos_token_num: int = 100, num_layers: int = 3, cross_attn: bool = False, embed_dims: int = 768, num_heads: int = 12, mlp_ratio: int = 4, num_fcs: int = 2, qkv_bias: bool = True, out_dims: int = 512, final_norm: bool = True, act_cfg: dict = dict(type='GELU'), norm_cfg: dict = dict(type='LN'), frozen_exclude: List = []): super().__init__() assert sos_token_format in [ 'cls_token', 'learnable_token', 'pos_embedding' ] self.sos_token_format = sos_token_format self.sos_token_num = sos_token_num self.frozen_exclude = frozen_exclude self.cross_attn = cross_attn self.num_layers = num_layers self.num_heads = num_heads if sos_token_format in ['learnable_token', 'pos_embedding']: self.sos_token = nn.Parameter( torch.randn(sos_token_num, 1, self.proj.shape[0])) self.frozen.append('sos_token') layers = [] for i in range(num_layers): layers.append( BaseTransformerLayer( attn_cfgs=dict( type='MultiheadAttention', embed_dims=embed_dims, num_heads=num_heads, batch_first=False, bias=qkv_bias), ffn_cfgs=dict( type='FFN', embed_dims=embed_dims, feedforward_channels=mlp_ratio * embed_dims, act_cfg=act_cfg), operation_order=('norm', 'self_attn', 'norm', 'ffn'))) self.layers = nn.ModuleList(layers) self.ln_post = build_norm_layer(norm_cfg, embed_dims)[1] self.proj = nn.Linear(embed_dims, out_dims, bias=False) self.final_norm = final_norm self._freeze() def init_weights(self, rec_state_dict): if hasattr(self, 'sos_token'): normal_init(self.sos_token, std=0.02) if rec_state_dict is not None: load_state_dict(self, rec_state_dict, strict=False, logger=None) else: super().init_weights() def _freeze(self): if 'all' in self.frozen_exclude: return for name, param in self.named_parameters(): if not any([exclude in name for exclude in self.frozen_exclude]): param.requires_grad = False def _build_attn_biases(self, attn_biases, target_shape): formatted_attn_biases = [] for attn_bias in attn_biases: # convert it to proper format: N*num_head,L,L # attn_bias: [N, num_head/1, num_sos,H,W] n, num_head, num_sos, h, w = attn_bias.shape # reshape and downsample attn_bias = F.adaptive_max_pool2d( attn_bias.reshape(n, num_head * num_sos, h, w), output_size=target_shape) attn_bias = attn_bias.reshape(n, num_head, num_sos, *target_shape) true_num_head = self.num_heads assert (num_head == 1 or num_head == true_num_head), f'num_head={num_head} is not supported.' if num_head == 1: attn_bias = attn_bias.repeat(1, true_num_head, 1, 1, 1) attn_bias = attn_bias.reshape(n * true_num_head, num_sos, -1) L = attn_bias.shape[-1] if self.cross_attn: # [n*num_head, num_sos, L] formatted_attn_biases.append(attn_bias) else: # [n*num_head, num_sos+1+L, num_sos+1+L] new_attn_bias = attn_bias.new_zeros(num_sos + 1 + L, num_sos + 1 + L) new_attn_bias[:, :num_sos] = -100 new_attn_bias[torch.arange(num_sos), torch.arange(num_sos)] = 0 new_attn_bias[:num_sos, num_sos] = -100 new_attn_bias = ( new_attn_bias[None, ...].expand(n * true_num_head, -1, -1).clone()) new_attn_bias[..., :num_sos, -L:] = attn_bias formatted_attn_biases.append(new_attn_bias) if len(formatted_attn_biases) == 1: formatted_attn_biases = [ formatted_attn_biases[0] for _ in range(self.num_layers) ] return formatted_attn_biases def forward(self, bias: List[Tensor], feature: List[Tensor]): """Forward function to recognize the category of masks Args: bias (List[Tensor]): Attention bias for transformer layers feature (List[Tensor]): Output of the image encoder, including cls_token and img_feature. """ cls_token = feature[1].unsqueeze(0) img_feature = feature[0] b, c, h, w = img_feature.shape # construct clip shadow features x = torch.cat( [cls_token, img_feature.reshape(b, c, -1).permute(2, 0, 1)]) # construct sos token if self.sos_token_format == 'cls_token': sos_token = cls_token.repeat(self.sos_token_num, 1, 1) elif self.sos_token_format == 'learnable_token': sos_token = self.sos_token.expand(-1, b, -1) elif self.sos_token_format == 'pos_embedding': sos_token = self.sos_token.expand(-1, b, -1) + cls_token # construct attn bias attn_biases = self._build_attn_biases(bias, target_shape=(h, w)) if self.cross_attn: for i, block in enumerate(self.layers): if self.cross_attn: sos_token = cross_attn_layer( block, sos_token, x[1:, ], attn_biases[i], ) if i < len(self.layers) - 1: x = block(x) else: x = torch.cat([sos_token, x], dim=0) for i, block in enumerate(self.layers): x = block(x, attn_masks=[attn_biases[i]]) sos_token = x[:self.sos_token_num] sos_token = sos_token.permute(1, 0, 2) # LND -> NLD sos_token = self.ln_post(sos_token) sos_token = self.proj(sos_token) if self.final_norm: sos_token = F.normalize(sos_token, dim=-1) return sos_token @MODELS.register_module() class SideAdapterCLIPHead(BaseDecodeHead): """Side Adapter Network (SAN) for open-vocabulary semantic segmentation with pre-trained vision-language model. This decode head is the implementation of `Side Adapter Network for Open-Vocabulary Semantic Segmentation` <https://arxiv.org/abs/2302.12242>. Modified from https://github.com/MendelXu/SAN/blob/main/san/model/side_adapter/side_adapter.py # noqa:E501 Copyright (c) 2023 MendelXu. Licensed under the MIT License Args: num_classes (int): the number of classes. san_cfg (ConfigType): Configs for SideAdapterNetwork module maskgen_cfg (ConfigType): Configs for RecWithAttnbias module """ def __init__(self, num_classes: int, san_cfg: ConfigType, maskgen_cfg: ConfigType, deep_supervision_idxs: List[int], train_cfg: ConfigType, **kwargs): super().__init__( in_channels=san_cfg.in_channels, channels=san_cfg.embed_dims, num_classes=num_classes, **kwargs) assert san_cfg.num_queries == maskgen_cfg.sos_token_num, \ 'num_queries in san_cfg should be equal to sos_token_num ' \ 'in maskgen_cfg' del self.conv_seg self.side_adapter_network = SideAdapterNetwork(**san_cfg) self.rec_with_attnbias = RecWithAttnbias(**maskgen_cfg) self.deep_supervision_idxs = deep_supervision_idxs self.train_cfg = train_cfg if train_cfg: self.match_masks = MatchMasks( num_points=train_cfg.num_points, num_queries=san_cfg.num_queries, num_classes=num_classes, assigner=train_cfg.assigner) def init_weights(self): rec_state_dict = None if isinstance(self.init_cfg, dict) and \ self.init_cfg.get('type') == 'Pretrained_Part': checkpoint = CheckpointLoader.load_checkpoint( self.init_cfg['checkpoint'], logger=None, map_location='cpu') rec_state_dict = checkpoint.copy() para_prefix = 'decode_head.rec_with_attnbias' prefix_len = len(para_prefix) + 1 for k, v in checkpoint.items(): rec_state_dict.pop(k) if para_prefix in k: rec_state_dict[k[prefix_len:]] = v self.side_adapter_network.init_weights() self.rec_with_attnbias.init_weights(rec_state_dict) def forward(self, inputs: Tuple[Tensor], deep_supervision_idxs) -> Tuple[List]: """Forward function. Args: inputs (Tuple[Tensor]): A triplet including images, list of multi-level visual features from image encoder and class embeddings from text_encoder. Returns: mask_props (List[Tensor]): Mask proposals predicted by SAN. mask_logits (List[Tensor]): Class logits of mask proposals. """ imgs, clip_feature, class_embeds = inputs # predict mask proposals and attention bias mask_props, attn_biases = self.side_adapter_network( imgs, clip_feature, deep_supervision_idxs) # mask recognition with attention bias mask_embeds = [ self.rec_with_attnbias(att_bias, clip_feature[-1]) for att_bias in attn_biases ] # Obtain class prediction of masks by comparing the similarity # between the image token and the text embedding of class names. mask_logits = [ torch.einsum('bqc,nc->bqn', mask_embed, class_embeds) for mask_embed in mask_embeds ] return mask_props, mask_logits def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict], test_cfg: ConfigType) -> Tensor: """Forward function for prediction. Args: inputs (Tuple[Tensor]): Images, visual features from image encoder and class embedding from text encoder. batch_img_metas (dict): List Image info where each dict may also contain: 'img_shape', 'scale_factor', 'flip', 'img_path', 'ori_shape', and 'pad_shape'. For details on the values of these keys see `mmseg/datasets/pipelines/formatting.py:PackSegInputs`. test_cfg (dict): The testing config. Returns: Tensor: Outputs segmentation logits map. """ mask_props, mask_logits = self.forward(inputs, []) return self.predict_by_feat([mask_props[-1], mask_logits[-1]], batch_img_metas) def predict_by_feat(self, seg_logits: List[Tensor], batch_img_metas: List[dict]) -> Tensor: """1. Transform a batch of mask proposals to the input shape. 2. Generate segmentation map with mask proposals and class logits. """ mask_pred = seg_logits[0] cls_score = seg_logits[1] if isinstance(batch_img_metas[0]['img_shape'], torch.Size): # slide inference size = batch_img_metas[0]['img_shape'] elif 'pad_shape' in batch_img_metas[0]: size = batch_img_metas[0]['pad_shape'][:2] else: size = batch_img_metas[0]['img_shape'] # upsample mask mask_pred = F.interpolate( mask_pred, size=size, mode='bilinear', align_corners=False) mask_cls = F.softmax(cls_score, dim=-1)[..., :-1] mask_pred = mask_pred.sigmoid() seg_logits = torch.einsum('bqc,bqhw->bchw', mask_cls, mask_pred) return seg_logits
def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList,
2
2023-12-23 08:36:47+00:00
16k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n # from https://github.com/YYuX-1145/Bert-VITS2-Integration-package\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.randn(1024, len(phone))\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.randn(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.randn(1024, len(phone))\n ja_bert = torch.randn(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "AudioVisemesLoader", "path": "data_utils.py", "snippet": "class AudioVisemesLoader(torch.utils.data.Dataset):\n \"\"\"\n loads audio, visemes torch variable pairs from visemes list file .\n file is like: \n ./records/date_time.z.npy|./records/date_time.npy\n \"\"\"\n \n def __init__(self, audio_visemes_list_file, hparams):\n self.audio_visemes_list_items = load_filepaths_and_text(audio_visemes_list_file)\n print('audio_visemes_list_items: ', len(self.audio_visemes_list_items))\n random.seed(1234)\n random.shuffle(self.audio_visemes_list_items)\n self.max_visemes_len = 1210\n self.min_visemes_len = 1190\n self._filter()\n\n\n def _filter(self):\n # check if the file exists, and can parse as torch tensor\n audio_visemes_list_items_new = []\n for audio_file, visemes_file in self.audio_visemes_list_items:\n if os.path.exists(audio_file) and os.path.exists(visemes_file):\n # check using torch.load\n try:\n audio = torch.load(audio_file)\n visemes = np.load(visemes_file)\n if visemes.shape[0] < self.min_visemes_len:\n print('drop this data: --------- visemes.shape[0] < self.min_visemes_len: ', visemes.shape[0], visemes_file)\n continue\n audio_visemes_list_items_new.append([audio_file, visemes_file])\n except Exception as e:\n print('error: ', audio_file, visemes_file)\n print(e)\n self.audio_visemes_list_items = audio_visemes_list_items_new\n print('audio_visemes_list_items after filter: ', len(self.audio_visemes_list_items))\n\n def __getitem__(self, index):\n # read these two torch.tensor\n audio_file, visemes_file = self.audio_visemes_list_items[index]\n audio_z = torch.load(audio_file).squeeze(0).detach()\n # [192, seq_len(1722)]\n\n visemes = np.load(visemes_file)\n visemes = torch.from_numpy(visemes)\n #[seq_len(1194), 61]\n visemes = visemes.transpose(0, 1)\n #[61, seq_len(1194)]\n if visemes.shape[1] > self.max_visemes_len:\n # cut the extra part\n # print('__getitem__ 1 cut visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n visemes = visemes[:, :self.max_visemes_len]\n elif visemes.shape[1] < self.max_visemes_len:\n # padding to max_visemes_len with last frame\n # print('__getitem__ 2 padding visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n # last_frame = visemes[-1]\n # visemes = np.concatenate([visemes, np.tile(last_frame, (self.max_visemes_len - visemes.shape[0], 1))], axis=0)\n # visemes = torch.from_numpy(visemes)\n pass\n\n visemes_offset = 0.08 # 将visemes延迟n s\n visemes_offset_frames = int(visemes_offset * const_map.ARKIT_FPS)\n visemes = visemes[:, visemes_offset_frames:]\n\n audio_z_offset = 0.0\n audio_z_offset_frames = int(audio_z_offset * const_map.Z_FPS)\n audio_z = audio_z[:, audio_z_offset_frames:]\n\n # 获取二者的时长,将过长的一方多的部分丢弃\n visemes_duration = visemes.shape[1] / const_map.ARKIT_FPS\n audio_z_duration = audio_z.shape[1] / const_map.Z_FPS\n if visemes_duration > audio_z_duration:\n visemes = visemes[:, :int(audio_z_duration * const_map.ARKIT_FPS)]\n elif visemes_duration < audio_z_duration:\n audio_z = audio_z[:, :int(visemes_duration * const_map.Z_FPS)]\n\n\n # print('__getitem__ 3 audio.shape: ', audio.shape, 'visemes.shape: ', visemes.shape,'file: ', visemes_file)\n return audio_z, visemes\n\n def __len__(self):\n return len(self.audio_visemes_list_items)" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n logw_sdp = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n l_length_sdp += torch.sum((logw_sdp - logw_) ** 2, [1, 2]) / torch.sum(x_mask)\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_, logw_sdp),\n g,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n def get_post_enc_dec(self):\n return self.enc_q, self.dec" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.LSTM = nn.LSTM(\n 2 * filter_channels, filter_channels, batch_first=True, bidirectional=True\n )\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(2 * filter_channels, 1), nn.Sigmoid()\n )\n\n def forward_probability(self, x, dur):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = x.transpose(1, 2)\n x, _ = self.LSTM(x)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, dur)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "WavLMDiscriminator", "path": "models.py", "snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(\n self, slm_hidden=768, slm_layers=13, initial_channel=64, use_spectral_norm=False\n ):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(\n Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0)\n )\n\n self.convs = nn.ModuleList(\n [\n norm_f(\n nn.Conv1d(\n initial_channel, initial_channel * 2, kernel_size=5, padding=2\n )\n ),\n norm_f(\n nn.Conv1d(\n initial_channel * 2,\n initial_channel * 4,\n kernel_size=5,\n padding=2,\n )\n ),\n norm_f(\n nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)\n ),\n ]\n )\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n\n def forward(self, x):\n x = self.pre(x)\n\n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, modules.LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x" }, { "identifier": "VisemesNet", "path": "models.py", "snippet": "class VisemesNet(nn.Module):\n def active(self, x):\n # active_fun: 0: null, 1: tanh, 2: relu, 3: LeakyReLU\n if self.active_fun == 1:\n return torch.tanh(x)\n elif self.active_fun == 2:\n return torch.relu(x)\n elif self.active_fun == 3:\n return self.leakyReLU(x)\n else:\n return x\n\n def __init__(self, hidden_channels, lstm_bidirectional=True, active_fun = 3, enable_conv=True, \n use_transformer = False, enable_dropout=True):\n super(VisemesNet, self).__init__()\n self.lstm_bidirectional = lstm_bidirectional\n self.lstm_directions = 2 if lstm_bidirectional else 1\n self.use_transformer = use_transformer\n self.enable_dropout = enable_dropout\n if active_fun == 3:\n self.leakyReLU = nn.LeakyReLU(negative_slope=0.01)\n if use_transformer:\n num_heads=8\n num_layers=3\n dim_feedforward=512\n dropout=0.1\n activation=\"relu\"\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(\n d_model=hidden_channels, \n nhead=num_heads,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=True\n )\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)\n else:\n self.lstm = nn.LSTM(input_size=hidden_channels, hidden_size=128, num_layers=3, batch_first=True, bidirectional=lstm_bidirectional)\n if use_transformer:\n self.fc1 = nn.Linear(hidden_channels, 96)\n else:\n self.fc1 = nn.Linear(128 * self.lstm_directions, 96)\n self.fc2 = nn.Linear(96, 61)\n dropout_rate = 0.5\n if self.enable_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n conv_kernel_pre = 15\n conv_kernel_post = 11\n self.conv1d_pre = nn.Conv1d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=conv_kernel_pre, stride=1, padding=conv_kernel_pre//2)\n self.conv1d_post = nn.Conv1d(in_channels=61, out_channels=61, kernel_size=conv_kernel_post, stride=1, padding=conv_kernel_post//2)\n self.enable_conv = enable_conv\n self.active_fun = active_fun\n\n def forward(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.use_transformer:\n return self.forward_transformer(x, y)\n else:\n return self.forward_lstm(x, y)\n\n def forward_transformer(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n # batch_first: True (batch, seq, feature); False (seq, batch, feature).\n x = x.transpose(1, 2)\n\n expressions = self.transformer_encoder(x)\n \n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n # expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n\n return expressions \n\n def forward_lstm(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n x = x.transpose(1, 2)\n # x [batch_size, seq_len, hidden_channels]\n expressions = None\n expressions, _ = self.lstm(x)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n return expressions\n \n def init_weights(self):\n # 初始化权重\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n nn.init.xavier_uniform_(param.data)\n elif 'weight_hh' in name:\n nn.init.orthogonal_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight.data, 1)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.TransformerEncoderLayer):\n for name, param in m.named_parameters():\n if 'weight' in name:\n if param.dim() == 1:\n nn.init.normal_(param.data)\n else:\n nn.init.xavier_uniform_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.TransformerEncoder):\n for param in m.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param.data)\n else:\n nn.init.constant_(param.data, 0)" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "WavLMLoss", "path": "losses.py", "snippet": "class WavLMLoss(torch.nn.Module):\n def __init__(self, model, wd, model_sr, slm_sr=16000):\n super(WavLMLoss, self).__init__()\n self.wavlm = AutoModel.from_pretrained(model)\n self.wd = wd\n self.resample = torchaudio.transforms.Resample(model_sr, slm_sr)\n self.wavlm.eval()\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def forward(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16.squeeze(), output_hidden_states=True\n ).hidden_states\n\n floss = 0\n for er, eg in zip(wav_embeddings, y_rec_embeddings):\n floss += torch.mean(torch.abs(er - eg))\n\n return floss.mean()\n\n def generator(self, y_rec):\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_df_hat_g = self.wd(y_rec_embeddings)\n loss_gen = torch.mean((1 - y_df_hat_g) ** 2)\n\n return loss_gen\n\n def discriminator(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n y_d_gs = self.wd(y_rec_embeddings)\n\n y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs\n\n r_loss = torch.mean((1 - y_df_hat_r) ** 2)\n g_loss = torch.mean((y_df_hat_g) ** 2)\n\n loss_disc_f = r_loss + g_loss\n\n return loss_disc_f.mean()\n\n def discriminator_forward(self, wav):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n\n return y_d_rs" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
13,146
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0)
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0)
train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data)
4
2023-12-27 03:09:11+00:00
16k
chinhsuanwu/ifusion-threestudio
threestudio/systems/base.py
[ { "identifier": "Exporter", "path": "threestudio/models/exporters/base.py", "snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError" }, { "identifier": "ExporterOutput", "path": "threestudio/models/exporters/base.py", "snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]" }, { "identifier": "parse_optimizer", "path": "threestudio/systems/utils.py", "snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim" }, { "identifier": "parse_scheduler", "path": "threestudio/systems/utils.py", "snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler" }, { "identifier": "Updateable", "path": "threestudio/utils/base.py", "snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def do_update_step_end(self, epoch: int, global_step: int):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)\n self.update_step_end(epoch, global_step)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass\n\n def update_step_end(self, epoch: int, global_step: int):\n pass" }, { "identifier": "update_end_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)" }, { "identifier": "update_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)" }, { "identifier": "parse_structured", "path": "threestudio/utils/config.py", "snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg" }, { "identifier": "C", "path": "threestudio/utils/misc.py", "snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n if len(value) >= 6:\n select_i = 3\n for i in range(3, len(value) - 2, 2):\n if global_step >= value[i]:\n select_i = i + 2\n if select_i != 3:\n start_value, start_step = value[select_i - 3], value[select_i - 2]\n else:\n start_step, start_value = value[:2]\n end_value, end_step = value[select_i - 1], value[select_i]\n value = [start_step, start_value, end_value, end_step]\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value" }, { "identifier": "cleanup", "path": "threestudio/utils/misc.py", "snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()" }, { "identifier": "find_last_path", "path": "threestudio/utils/misc.py", "snippet": "def find_last_path(path: str):\n if (path is not None) and (\"LAST\" in path):\n path = path.replace(\" \", \"_\")\n base_dir_prefix, suffix = path.split(\"LAST\", 1)\n base_dir = os.path.dirname(base_dir_prefix)\n prefix = os.path.split(base_dir_prefix)[-1]\n base_dir_prefix = os.path.join(base_dir, prefix)\n all_path = os.listdir(base_dir)\n all_path = [os.path.join(base_dir, dir) for dir in all_path]\n filtered_path = [dir for dir in all_path if dir.startswith(base_dir_prefix)]\n filtered_path.sort(reverse=True)\n last_path = filtered_path[0]\n new_path = last_path + suffix\n if os.path.exists(new_path):\n return new_path\n else:\n raise FileNotFoundError(new_path)\n else:\n return path" }, { "identifier": "get_device", "path": "threestudio/utils/misc.py", "snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")" }, { "identifier": "load_module_weights", "path": "threestudio/utils/misc.py", "snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]" }, { "identifier": "SaverMixin", "path": "threestudio/utils/saving.py", "snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path" } ]
import os import pytorch_lightning as pl import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from threestudio.models.exporters.base import Exporter, ExporterOutput from threestudio.systems.utils import parse_optimizer, parse_scheduler from threestudio.utils.base import ( Updateable, update_end_if_possible, update_if_possible, ) from threestudio.utils.config import parse_structured from threestudio.utils.misc import ( C, cleanup, find_last_path, get_device, load_module_weights, ) from threestudio.utils.saving import SaverMixin from threestudio.utils.typing import * from threestudio.utils.config import load_config, parse_structured
11,182
norms = grad_norm(self.geometry, norm_type=2) print(norms) """ pass class BaseLift3DSystem(BaseSystem): @dataclass class Config(BaseSystem.Config): geometry_type: str = "" geometry: dict = field(default_factory=dict) geometry_convert_from: Optional[str] = None geometry_convert_inherit_texture: bool = False # used to override configurations of the previous geometry being converted from, # for example isosurface_threshold geometry_convert_override: dict = field(default_factory=dict) material_type: str = "" material: dict = field(default_factory=dict) background_type: str = "" background: dict = field(default_factory=dict) renderer_type: str = "" renderer: dict = field(default_factory=dict) guidance_type: str = "" guidance: dict = field(default_factory=dict) prompt_processor_type: str = "" prompt_processor: dict = field(default_factory=dict) # geometry export configurations, no need to specify in training exporter_type: str = "mesh-exporter" exporter: dict = field(default_factory=dict) cfg: Config def configure(self) -> None: self.cfg.geometry_convert_from = find_last_path(self.cfg.geometry_convert_from) self.cfg.weights = find_last_path(self.cfg.weights) if ( self.cfg.geometry_convert_from # from_coarse must be specified and not self.cfg.weights # not initialized from coarse when weights are specified and not self.resumed # not initialized from coarse when resumed from checkpoints ): threestudio.info("Initializing geometry from a given checkpoint ...") prev_cfg = load_config( os.path.join( os.path.dirname(self.cfg.geometry_convert_from), "../configs/parsed.yaml", ) ) # TODO: hard-coded relative path prev_system_cfg: BaseLift3DSystem.Config = parse_structured( self.Config, prev_cfg.system ) prev_geometry_cfg = prev_system_cfg.geometry prev_geometry_cfg.update(self.cfg.geometry_convert_override) prev_geometry = threestudio.find(prev_system_cfg.geometry_type)( prev_geometry_cfg ) state_dict, epoch, global_step = load_module_weights( self.cfg.geometry_convert_from, module_name="geometry", map_location="cpu", ) prev_geometry.load_state_dict(state_dict, strict=False) # restore step-dependent states prev_geometry.do_update_step(epoch, global_step, on_load_weights=True) # convert from coarse stage geometry prev_geometry = prev_geometry.to(get_device()) self.geometry = threestudio.find(self.cfg.geometry_type).create_from( prev_geometry, self.cfg.geometry, copy_net=self.cfg.geometry_convert_inherit_texture, ) del prev_geometry cleanup() else: self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry) self.material = threestudio.find(self.cfg.material_type)(self.cfg.material) self.background = threestudio.find(self.cfg.background_type)( self.cfg.background ) self.renderer = threestudio.find(self.cfg.renderer_type)( self.cfg.renderer, geometry=self.geometry, material=self.material, background=self.background, ) def on_fit_start(self) -> None: if self._save_dir is not None: threestudio.info(f"Validation results will be saved to {self._save_dir}") else: threestudio.warn( f"Saving directory not set for the system, visualization results will not be saved" ) def on_test_end(self) -> None: if self._save_dir is not None: threestudio.info(f"Test results saved to {self._save_dir}") def on_predict_start(self) -> None: self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)( self.cfg.exporter, geometry=self.geometry, material=self.material, background=self.background, ) def predict_step(self, batch, batch_idx): if self.exporter.cfg.save_video: self.test_step(batch, batch_idx) def on_predict_epoch_end(self) -> None: if self.exporter.cfg.save_video: self.on_test_epoch_end()
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, resumed=False) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) self.configure() if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self): optim = parse_optimizer(self.cfg.optimizer, self) ret = { "optimizer": optim, } if self.cfg.scheduler is not None: ret.update( { "lr_scheduler": parse_scheduler(self.cfg.scheduler, optim), } ) return ret def training_step(self, batch, batch_idx): raise NotImplementedError def validation_step(self, batch, batch_idx): raise NotImplementedError def on_train_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.train_dataloader.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) def on_validation_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.val_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_validation_step: # cleanup to save vram cleanup() def on_validation_epoch_end(self): raise NotImplementedError def test_step(self, batch, batch_idx): raise NotImplementedError def on_test_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.test_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_test_epoch_end(self): pass def predict_step(self, batch, batch_idx): raise NotImplementedError def on_predict_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.predict_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_predict_epoch_end(self): pass def preprocess_data(self, batch, stage): pass """ Implementing on_after_batch_transfer of DataModule does the same. But on_after_batch_transfer does not support DP. """ def on_train_batch_start(self, batch, batch_idx, unused=0): self.preprocess_data(batch, "train") self.dataset = self.trainer.train_dataloader.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "validation") self.dataset = self.trainer.val_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "test") self.dataset = self.trainer.test_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "predict") self.dataset = self.trainer.predict_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False): pass def on_before_optimizer_step(self, optimizer): """ # some gradient-related debugging goes here, example: from lightning.pytorch.utilities import grad_norm norms = grad_norm(self.geometry, norm_type=2) print(norms) """ pass class BaseLift3DSystem(BaseSystem): @dataclass class Config(BaseSystem.Config): geometry_type: str = "" geometry: dict = field(default_factory=dict) geometry_convert_from: Optional[str] = None geometry_convert_inherit_texture: bool = False # used to override configurations of the previous geometry being converted from, # for example isosurface_threshold geometry_convert_override: dict = field(default_factory=dict) material_type: str = "" material: dict = field(default_factory=dict) background_type: str = "" background: dict = field(default_factory=dict) renderer_type: str = "" renderer: dict = field(default_factory=dict) guidance_type: str = "" guidance: dict = field(default_factory=dict) prompt_processor_type: str = "" prompt_processor: dict = field(default_factory=dict) # geometry export configurations, no need to specify in training exporter_type: str = "mesh-exporter" exporter: dict = field(default_factory=dict) cfg: Config def configure(self) -> None: self.cfg.geometry_convert_from = find_last_path(self.cfg.geometry_convert_from) self.cfg.weights = find_last_path(self.cfg.weights) if ( self.cfg.geometry_convert_from # from_coarse must be specified and not self.cfg.weights # not initialized from coarse when weights are specified and not self.resumed # not initialized from coarse when resumed from checkpoints ): threestudio.info("Initializing geometry from a given checkpoint ...") prev_cfg = load_config( os.path.join( os.path.dirname(self.cfg.geometry_convert_from), "../configs/parsed.yaml", ) ) # TODO: hard-coded relative path prev_system_cfg: BaseLift3DSystem.Config = parse_structured( self.Config, prev_cfg.system ) prev_geometry_cfg = prev_system_cfg.geometry prev_geometry_cfg.update(self.cfg.geometry_convert_override) prev_geometry = threestudio.find(prev_system_cfg.geometry_type)( prev_geometry_cfg ) state_dict, epoch, global_step = load_module_weights( self.cfg.geometry_convert_from, module_name="geometry", map_location="cpu", ) prev_geometry.load_state_dict(state_dict, strict=False) # restore step-dependent states prev_geometry.do_update_step(epoch, global_step, on_load_weights=True) # convert from coarse stage geometry prev_geometry = prev_geometry.to(get_device()) self.geometry = threestudio.find(self.cfg.geometry_type).create_from( prev_geometry, self.cfg.geometry, copy_net=self.cfg.geometry_convert_inherit_texture, ) del prev_geometry cleanup() else: self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry) self.material = threestudio.find(self.cfg.material_type)(self.cfg.material) self.background = threestudio.find(self.cfg.background_type)( self.cfg.background ) self.renderer = threestudio.find(self.cfg.renderer_type)( self.cfg.renderer, geometry=self.geometry, material=self.material, background=self.background, ) def on_fit_start(self) -> None: if self._save_dir is not None: threestudio.info(f"Validation results will be saved to {self._save_dir}") else: threestudio.warn( f"Saving directory not set for the system, visualization results will not be saved" ) def on_test_end(self) -> None: if self._save_dir is not None: threestudio.info(f"Test results saved to {self._save_dir}") def on_predict_start(self) -> None: self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)( self.cfg.exporter, geometry=self.geometry, material=self.material, background=self.background, ) def predict_step(self, batch, batch_idx): if self.exporter.cfg.save_video: self.test_step(batch, batch_idx) def on_predict_epoch_end(self) -> None: if self.exporter.cfg.save_video: self.on_test_epoch_end()
exporter_output: List[ExporterOutput] = self.exporter()
1
2023-12-27 20:30:33+00:00
16k
gardenifi/server
app/raspi/mqtt.py
[ { "identifier": "Services", "path": "app/raspi/services.py", "snippet": "class Services:\n \"\"\"\n The `Services` class provides various methods for managing and controlling\n services related to a Raspberry Pi device, such as turning on/off valves,\n storing and deleting program cycles, loading program cycles, discovering\n WiFi networks, and saving WiFi network configurations.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self._scheduler = BackgroundScheduler()\n self._scheduler_started = False\n\n @property\n def scheduler_started(self):\n \"\"\"getter\"\"\"\n return self._scheduler_started\n\n @scheduler_started.setter\n def scheduler_started(self, value):\n \"\"\"setter\"\"\"\n self._scheduler_started = value\n\n @property\n def scheduler(self):\n \"\"\"getter\"\"\"\n return self._scheduler\n\n @scheduler.setter\n def scheduler(self, value):\n \"\"\"setter\"\"\"\n self._scheduler = value\n\n def turn_on_from_program(self, valve):\n \"\"\"\n Turn on a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(2, \"out\" + str(valve))\n\n def turn_off_from_program(self, valve):\n \"\"\"\n Turn off a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(0, \"out\" + str(valve))\n\n def get_stop_datetime(self, day, start_hour, start_min, period):\n \"\"\"\n Calculate the stop time for a program cycle.\n\n Parameters:\n - day (str): The day of the week.\n - start_hour (int): The starting hour.\n - start_min (int): The starting minute.\n - period (int): The duration of the cycle in minutes.\n\n Returns:\n tuple: A tuple containing the stop day, stop hour, and stop minute.\n \"\"\"\n logger.debug(f\"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}\")\n stop_day_index = DAYS.index(day)\n logger.debug(f\"stop_day_index {stop_day_index}\")\n\n stop_min = (start_min + period) % 60\n logger.debug(f\"stop_min {stop_min}\")\n\n if stop_min < start_min:\n # should go to the next hour\n stop_hour = (start_hour + 1) % 24\n # should go to the next day\n if stop_hour < start_hour:\n stop_day_index = (stop_day_index + 1) % 7\n else:\n stop_hour = start_hour\n\n logger.debug(f\"stop_hour {stop_hour}\")\n\n stop_day = DAYS[stop_day_index]\n logger.debug(f\"stop_day: {stop_day}\")\n\n return stop_day, stop_hour, stop_min\n\n def store_program_cycles(self, json_data, store=False) -> None:\n \"\"\"\n Store program cycles and schedule them using the scheduler.\n\n Parameters:\n - json_data (dict): JSON data containing program information.\n - store (bool, optional): Whether to store the program information. Default is False.\n\n Returns:\n None\n \"\"\"\n try:\n triggers_to_start = []\n triggers_to_stop = []\n for day in json_data[\"days\"].split(\",\"):\n if day not in DAYS:\n raise DayValueException(f\"{day} is not correct! Accepted values: {DAYS}\")\n for cycle in json_data[\"cycles\"]:\n logger.info(f\"Cycle: {cycle}\")\n if int(cycle[\"min\"]) <= 0:\n logger.info(\"This cycle should not be considered to be in the program due to min <=0.\")\n continue\n start_hour = cycle[\"start\"].split(\":\")[0]\n start_min = cycle[\"start\"].split(\":\")[1]\n\n logger.info(f\"Start: {day} at {start_hour}:{start_min}\")\n triggers_to_start.append(CronTrigger(day_of_week=day, hour=int(start_hour), minute=int(start_min)))\n\n stop_day, stop_hour, stop_min = self.get_stop_datetime(day, int(start_hour), int(start_min), int(cycle[\"min\"]))\n logger.info(f\"Stop: {stop_day} at {stop_hour}:{stop_min}\")\n triggers_to_stop.append(CronTrigger(day_of_week=stop_day, hour=stop_hour, minute=stop_min))\n\n logger.info(f\"FINAL Triggers To Start to be in the program:{triggers_to_start}\")\n logger.info(f\"FINAL Triggers To Stop to be in the program: {triggers_to_stop}\")\n\n self._scheduler.add_job(self.turn_on_from_program, OrTrigger(triggers_to_start), args=[json_data[\"out\"]])\n self._scheduler.add_job(self.turn_off_from_program, OrTrigger(triggers_to_stop), args=[json_data[\"out\"]])\n\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n\n if store is True:\n file_path = PROGRAM + str(json_data[\"out\"]) + PROGRAM_EXT\n with open(file_path, \"w\", encoding=\"utf-8\") as outfile:\n json.dump(json_data, outfile)\n outfile.close()\n\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def delete_program(self, valve) -> bool:\n \"\"\"\n Delete a stored program for a specific valve.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n bool: True if the program was deleted, False otherwise.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Looking for {file_path} to delete!\")\n if path.exists(file_path):\n logger.info(f\"{file_path} exists! Deleting it...\")\n remove(file_path)\n return True\n return False\n\n def load_program_cycles_if_exists(self, valve):\n \"\"\"\n Load program cycles for a valve if a stored program exists.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n dict or None: The loaded JSON data or None if no program exists.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Loading {file_path} if exists!\")\n json_data = None\n if path.exists(file_path):\n logger.info(f\"{file_path} exists!\")\n with open(file_path, encoding=\"utf-8\") as json_file:\n json_data = json.load(json_file)\n self.store_program_cycles(json_data)\n json_file.close()\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n return json_data\n\n def split_json_into_chunks(self, selected_page, ap_array):\n \"\"\"\n Split a JSON array into chunks and create a response JSON.\n\n Parameters:\n - selected_page (int): The requested page number.\n - ap_array (list): The array to be split.\n\n Returns:\n dict: The response JSON containing the specified page and network information.\n \"\"\"\n selected_page = int(selected_page)\n json_response = {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"page\": selected_page,\n \"nets\": {},\n \"pages\": 0,\n }\n json_response_to_send = json_response.copy()\n\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n logger.debug(f\"Initial JSON response headers size: {headers_size} bytes\")\n\n pages = 1\n current_chunk_size = headers_size\n json_array = []\n\n for item in ap_array:\n json_response[\"pages\"] = pages\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n item_size = len(json.dumps(item).encode(\"utf-8\"))\n logger.debug(\n \"JSON item size: \"\n + f\"{item_size} bytes, \"\n + \"current_chunk_size: \"\n + f\"{current_chunk_size} bytes, \"\n + \"total: \"\n + f\"{current_chunk_size + item_size} bytes\"\n )\n if current_chunk_size + item_size >= MAX_NUM_OF_BYTES_CHUNK - MAX_NUM_OF_BUFFER_TO_ADD:\n pages += 1\n json_response[\"pages\"] = pages\n json_array = [item]\n json_response[\"nets\"] = json_array\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n current_chunk_size = headers_size + item_size + len(\", \")\n logger.debug(\n f\"Found total >= {MAX_NUM_OF_BYTES_CHUNK}: \"\n f\"Creating a new page: {pages}. \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n else:\n json_array.append(item)\n current_chunk_size += item_size + len(\", \")\n if selected_page == pages:\n json_response_to_send[\"nets\"] = json_array\n\n json_response_to_send[\"pages\"] = pages\n logger.debug(f\"JSON response size: {headers_size}\")\n logger.debug(\n f\"Nets array for this page ({pages}): {json_array}. \"\n f\"Current nets array size: {len(json.dumps(json_array).encode('utf-8'))} bytes, \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n\n if not json_response[\"nets\"]:\n json_response_to_send[\"nets\"] = json_array\n\n logger.debug(f\"JSON total size: {len(json.dumps(json_response_to_send).encode('utf-8'))}\")\n return json_response_to_send\n\n def discover_wifi_networks(self, chunked=0, page=1, refresh_networks_file=False):\n \"\"\"\n Discover available WiFi networks and return the information.\n\n Parameters:\n - chunked (int, optional): Whether to split the response into chunks. Default is 0.\n - page (int, optional): The requested page number. Default is 1.\n - refresh_networks_file (bool, optional): Whether to refresh the networks file. Default is False.\n\n Returns:\n str or dict: The JSON response containing WiFi network information.\n \"\"\"\n try:\n if page > 1:\n refresh_networks_file = False\n json_response = {}\n ap_array = []\n retries = 0\n while retries < 30:\n retries = retries + 1\n ap_array = Helpers().scan_rpi_wifi_networks(refresh_networks_file)\n if len(ap_array) != 0:\n break\n\n json_response = json.dumps(\n {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"ap_array\": ap_array,\n }\n )\n\n logger.info(f\"json_response: {json_response}\")\n if chunked == 0:\n return json_response\n logger.info(f\"Split array into chunks of {MAX_NUM_OF_BYTES_CHUNK} bytes...\")\n json_response = self.split_json_into_chunks(page, ap_array)\n return json_response\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network(self, ssid, wifi_key):\n \"\"\"\n Save WiFi network information.\n\n Parameters:\n - request_data (dict): The request data containing WiFi network information.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n if ssid and wifi_key:\n Helpers().store_wpa_ssid_key(ssid, wifi_key)\n return \"OK\"\n raise ValueError(\"Error: You need to provide ssid and wifi_keys in POST data\")\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network_with_wpa(self, wpa_enabled, wpa_key):\n \"\"\"\n Save WiFi network information with WPA settings.\n\n Parameters:\n - request_params (dict): The request parameters containing WPA settings.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n logger.info(f\"wpa_enabled: {wpa_enabled}, wpa_key: {wpa_key}\")\n if str(wpa_enabled) == \"1\":\n Helpers().update_wpa_supplicant(1, wpa_key)\n else:\n Helpers().update_wpa_supplicant(0, wpa_key)\n\n thread = Thread(target=Helpers().sleep_and_reboot_for_wpa)\n thread.start()\n return \"OK\"\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise" }, { "identifier": "MQTT_CLIENT_ID", "path": "app/raspi/const.py", "snippet": "MQTT_CLIENT_ID = \"RaspirriV1-MQTT-Client\" + str(uuid.uuid4())" }, { "identifier": "MQTT_TOPIC_STATUS", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_STATUS = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_STATUS\", \"/status\")" }, { "identifier": "MQTT_TOPIC_METADATA", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_METADATA = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_METADATA\", \"/metadata\")" }, { "identifier": "MQTT_TOPIC_CONFIG", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_CONFIG = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_CONFIG\", \"/config\")" }, { "identifier": "MQTT_TOPIC_CMD", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_CMD = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_CMD\", \"/command\")" }, { "identifier": "MQTT_TOPIC_VALVES", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_VALVES = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_VALVES\", \"/valves\")" }, { "identifier": "MQTT_STATUS_ERR", "path": "app/raspi/const.py", "snippet": "MQTT_STATUS_ERR = '{\"sts\": 1, \"err\": '" }, { "identifier": "PROGRAM", "path": "app/raspi/const.py", "snippet": "PROGRAM = \"program_\"" }, { "identifier": "PROGRAM_EXT", "path": "app/raspi/const.py", "snippet": "PROGRAM_EXT = \".json\"" }, { "identifier": "MQTT_STATUS_OK", "path": "app/raspi/const.py", "snippet": "MQTT_STATUS_OK = '{\"sts\": 0, \"res\": '" }, { "identifier": "MQTT_OK", "path": "app/raspi/const.py", "snippet": "MQTT_OK = '\"OK\"'" }, { "identifier": "MQTT_END", "path": "app/raspi/const.py", "snippet": "MQTT_END = \"}\"" }, { "identifier": "MQTT_USER", "path": "app/raspi/const.py", "snippet": "MQTT_USER = load_env_variable(\"MQTT_USER\", \"user\")" }, { "identifier": "MQTT_PASS", "path": "app/raspi/const.py", "snippet": "MQTT_PASS = load_env_variable(\"MQTT_PASS\", \"pass\")" }, { "identifier": "MQTT_HOST", "path": "app/raspi/const.py", "snippet": "MQTT_HOST = load_env_variable(\"MQTT_HOST\", \"localhost\")" }, { "identifier": "MQTT_PORT", "path": "app/raspi/const.py", "snippet": "MQTT_PORT = load_env_variable(\"MQTT_PORT\", \"1883\")" }, { "identifier": "Helpers", "path": "app/raspi/helpers.py", "snippet": "class Helpers:\n \"\"\"\n The `Helpers` class provides various helper methods for performing tasks\n such as setting valves, getting system information, storing and loading\n objects to/from files, managing WiFi networks, and updating the `wpa_supplicant.conf` file.\n \"\"\"\n\n __instance = None\n __lock = threading.Lock()\n\n def __new__(cls):\n \"\"\"\n Create a new instance of the Helpers class using the singleton design pattern.\n\n Returns:\n An instance of the Helpers class.\n\n Example Usage:\n instance = Helpers()\n \"\"\"\n if cls.__instance is None:\n with cls.__lock:\n cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n return cls.__instance\n\n @classmethod\n def destroy_instance(cls):\n \"\"\"\n Destroy the instance of the Helpers class.\n\n This method sets the instance of the Helpers class to None, effectively destroying the instance.\n\n Example Usage:\n ```python\n instance = Helpers() # Create an instance of the Helpers class\n Helpers.destroy_instance() # Destroy the instance\n print(instance) # Output: None\n ```\n\n Inputs:\n None\n\n Outputs:\n None\n \"\"\"\n cls.__instance = None\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n\n @property\n def toggle_statuses(self):\n \"\"\"\n Getter method for the toggle_statuses property.\n\n Returns:\n dict: A dictionary containing toggle statuses.\n\n Example:\n Access toggle statuses using `instance.toggle_statuses`.\n \"\"\"\n return self._toggle_statuses\n\n @toggle_statuses.setter\n def toggle_statuses(self, value):\n \"\"\"\n Setter method for the toggle_statuses property.\n\n Args:\n value (dict): A dictionary containing toggle statuses to set.\n\n Example:\n Set toggle statuses using `instance.toggle_statuses = new_statuses`.\n \"\"\"\n self._toggle_statuses = value\n\n @property\n def ap_array(self):\n \"\"\"\n Getter method for the _ap_array property.\n\n Returns:\n An array of wifi networks\n\n Example:\n Access toggle statuses using `instance.ap_array`.\n \"\"\"\n return self._ap_array\n\n @ap_array.setter\n def ap_array(self, value):\n \"\"\"\n Setter method for the _ap_array property.\n\n Args:\n value (dict): An array containing the wifi networks to set.\n\n Example:\n Set toggle statuses using `instance.ap_array = new_ap_array`.\n \"\"\"\n self._ap_array = value\n\n def set_valves(self, valves):\n \"\"\"\n Set valve statuses in the toggle_statuses dictionary.\n\n Args:\n valves (str or dict): A string or dictionary representing valve statuses.\n\n Example:\n instance.set_valves('{\"valve1\": true, \"valve2\": false}')\n \"\"\"\n try:\n if isinstance(valves, str):\n valves = ast.literal_eval(valves)\n else:\n valves = ast.literal_eval(str(valves))\n self._toggle_statuses[\"valves\"] = valves\n except Exception as exception:\n logger.error(f\"Error in set_valves: {exception}\")\n raise\n\n def extract_local_ip(self):\n \"\"\"\n Extract the local IP address of the device.\n\n Returns:\n str: The local IP address.\n\n Example:\n local_ip = instance.extract_local_ip()\n \"\"\"\n tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n tcp_sock.connect((\"8.8.8.8\", 1))\n ip_address = tcp_sock.getsockname()[0]\n except Exception:\n ip_address = \"127.0.0.1\"\n finally:\n tcp_sock.close()\n return ip_address\n\n def get_uptime(self):\n \"\"\"\n Get the system uptime.\n\n Returns:\n str: The system uptime.\n\n Example:\n uptime = instance.get_uptime()\n \"\"\"\n try:\n result = subprocess.run([\"uptime\", \"-p\"], stdout=subprocess.PIPE, text=True, check=True)\n return result.stdout.replace(\"\\n\", \"\")\n except Exception as e:\n logger.error(f\"Error retrieving uptime: {e}\")\n return str(e)\n\n def get_git_commit_id(self):\n \"\"\"\n Get the Git commit ID of the current project.\n\n Returns:\n str: The Git commit ID.\n\n Example:\n commit_id = instance.get_git_commit_id()\n \"\"\"\n # Specify the file path\n file_path = \"app/git_commit_id.txt\"\n\n # Open the file in read mode ('r')\n try:\n with open(file_path, encoding=\"utf-8\") as file:\n # Read the entire content of the file\n content = file.read().replace(\"\\n\", \"\")\n logger.debug(f\"File content: {content}\")\n return content\n except FileNotFoundError as e:\n logger.error(f\"The file '{file_path}' does not exist.\")\n return str(e)\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Error retrieving git log: {e}\")\n return str(e)\n\n def store_object_to_file(self, filename, local_object):\n \"\"\"\n Store a local object to a file using pickle.\n\n Args:\n filename (str): The name of the file to store the object.\n local_object (object): The object to be stored.\n\n Example:\n instance.store_object_to_file('data.pkl', data)\n \"\"\"\n try:\n with open(filename, \"wb\") as obj_file:\n pickle.dump(local_object, obj_file)\n logger.info(f\"Stored local object file: {filename}: {local_object}\")\n obj_file.close()\n return local_object\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def store_toggle_statuses_to_file(self):\n \"\"\"\n Store toggle statuses to a file.\n\n Returns:\n dict: The toggle statuses being stored.\n\n Example:\n stored_statuses = instance.store_toggle_statuses_to_file()\n \"\"\"\n return self.store_object_to_file(STATUSES_FILE, self._toggle_statuses)\n\n def store_wifi_networks_to_file(self):\n \"\"\"\n Store WiFi networks to a file.\n\n Returns:\n list: The WiFi networks being stored.\n\n Example:\n stored_networks = instance.store_wifi_networks_to_file()\n \"\"\"\n return self.store_object_to_file(NETWORKS_FILE, self._ap_array)\n\n def load_object_from_file(self, filename):\n \"\"\"\n Load a local object from a file using pickle.\n\n Args:\n filename (str): The name of the file to load the object from.\n\n Returns:\n object: The loaded object.\n\n Example:\n loaded_object = instance.load_object_from_file('data.pkl')\n \"\"\"\n try:\n local_obj = {}\n with open(filename, \"rb\") as obj_file:\n local_obj = pickle.load(obj_file)\n logger.info(f\"Loaded local object file: {filename}: {local_obj}\")\n obj_file.close()\n return local_obj\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n self.store_object_to_file(filename, local_obj)\n return local_obj\n\n def load_toggle_statuses_from_file(self):\n \"\"\"\n Load toggle statuses from a file and update the instance's _toggle_statuses attribute.\n \"\"\"\n self._toggle_statuses = self.load_object_from_file(STATUSES_FILE)\n\n def load_wifi_networks_from_file(self):\n \"\"\"\n Load WiFi networks from a file and update the instance's _ap_array attribute.\n \"\"\"\n self._ap_array = self.load_object_from_file(NETWORKS_FILE)\n\n def get_timezone(self):\n \"\"\"\n Get the system timezone.\n\n Returns:\n str: The system timezone.\n\n Example:\n timezone = instance.get_timezone()\n \"\"\"\n return str(time.tzname[time.daylight])\n\n def check_empty_toggle(self, valve):\n \"\"\"\n Check if a toggle status is empty for a specific valve and set a default value if it is.\n\n Args:\n valve (str): The name of the valve.\n\n Example:\n instance.check_empty_toggle(\"out1\")\n \"\"\"\n if self._toggle_statuses.get(valve) is None:\n self._toggle_statuses[valve] = 0\n self._toggle_statuses[valve] = self.set_gpio_outputs(self._toggle_statuses[valve], valve)\n\n def get_toggle_statuses(self):\n \"\"\"\n Get and update toggle statuses, system information, and store them to a file.\n\n Returns:\n dict: The updated toggle statuses.\n\n Example:\n updated_statuses = instance.get_toggle_statuses()\n \"\"\"\n if \"valves\" not in self._toggle_statuses:\n self.set_valves([])\n\n self.check_empty_toggle(\"out1\")\n self.check_empty_toggle(\"out2\")\n self.check_empty_toggle(\"out3\")\n self.check_empty_toggle(\"out4\")\n\n self._toggle_statuses[\"server_time\"] = str(datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n self._toggle_statuses[\"tz\"] = self.get_timezone()\n self._toggle_statuses[\"hw_id\"] = RPI_HW_ID\n\n logger.info(f\"Valves statuses:{self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n\n return self._toggle_statuses\n\n def set_gpio_outputs(self, status, valve):\n \"\"\"\n Set GPIO outputs for a specified valve.\n\n Args:\n status (int): The status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n int: The modified status.\n\n Example:\n modified_status = instance.set_gpio_outputs(1, \"out1\")\n \"\"\"\n status = bool(status in (1, 2))\n logger.info(f\"Set Output of Valve: {valve}::{status}\")\n if ARCH == \"arm\":\n if valve == \"out2\":\n logger.info(f\"===========> Setting PIN 11 GPIO.output...{status}\")\n # RuntimeError: Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)\n GPIO.output(11, status)\n logger.info(f\"===========> PIN 11 Status GPIO.input: {GPIO.input(11)}\")\n return 1 if status is True else 0\n\n def toggle(self, status, valve):\n \"\"\"\n Toggle a valve, set GPIO outputs, update toggle statuses, and store them to a file.\n\n Args:\n status (int): The new status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n str: A confirmation message.\n\n Example:\n confirmation = instance.toggle(1, \"out1\")\n \"\"\"\n status = self.set_gpio_outputs(status, valve)\n self._toggle_statuses[valve] = status\n logger.info(f\"Modified valves statuses: {self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n return \"OK\"\n\n @property\n def is_connected_to_inet(self):\n \"\"\"\n Get the current internet connection status.\n\n Returns:\n bool: True if connected, False otherwise.\n\n Example:\n connection_status = instance.is_connected_to_inet()\n \"\"\"\n return self._is_connected_to_inet\n\n @is_connected_to_inet.setter\n def is_connected_to_inet(self, value):\n \"\"\"\n Set the current internet connection status.\n\n Returns:\n None\n\n Example:\n instance.is_connected_to_inet = connection_status\n \"\"\"\n self._is_connected_to_inet = value\n\n def system_reboot(self):\n \"\"\"\n Reboot the system after a 2-second delay.\n \"\"\"\n logger.info(\"Rebooting in 2 seconds...\")\n time.sleep(2)\n try:\n subprocess.run([\"reboot\"], stdout=subprocess.PIPE, text=True, check=True)\n except Exception as e:\n logger.error(f\"Error rebooting: {e}\")\n\n def system_update(self):\n \"\"\"\n Update the system through git.\n \"\"\"\n logger.info(\"Git update code and restart...\")\n try:\n subprocess.run([\"/usr/bin/git\", \"pull\"], stdout=subprocess.PIPE, text=True, check=True)\n os.kill(os.getpid(), signal.SIGTERM)\n except Exception as e:\n logger.error(f\"Error updating git: {e}\")\n\n def checking_for_duplicate_ssids(self, ssid, ap_array):\n \"\"\"\n Check for duplicate SSIDs in the list of WiFi networks.\n\n Args:\n ssid (str): The SSID to check.\n ap_array (list): The list of WiFi networks.\n\n Returns:\n bool: True if a duplicate is found, False otherwise.\n\n Example:\n is_duplicate = instance.checking_for_duplicate_ssids(\"MyWiFi\", wifi_networks)\n \"\"\"\n for wifi in ap_array:\n if wifi[\"ssid\"] == ssid:\n return True\n return False\n\n def scan_rpi_wifi_networks(self, refresh=False):\n \"\"\"\n Scan for available WiFi networks and update the instance's _ap_array attribute.\n\n Args:\n refresh (bool): If True, force a refresh of the WiFi networks list.\n\n Returns:\n list: The updated list of WiFi networks.\n\n Example:\n wifi_networks = instance.scan_rpi_wifi_networks()\n \"\"\"\n self._ap_array = []\n index = 0\n if not os.path.exists(NETWORKS_FILE):\n refresh = True\n if refresh:\n if ARCH == \"arm\":\n with subprocess.Popen([\"iwlist\", \"scan\"], stdout=subprocess.PIPE) as iwlist_raw:\n ap_list, err = iwlist_raw.communicate()\n if err is not None:\n logger.error(f\"Popen error: {err}\")\n return self._ap_array\n logger.debug(f\"iwlist scan command output: {ap_list}\")\n for line in ap_list.decode(\"utf-8\").rsplit(\"\\n\"):\n logger.debug(f\"Line: {line}\")\n if \"ESSID\" in line:\n ap_ssid = line[27:-1]\n if ap_ssid != \"\" and not self.checking_for_duplicate_ssids(ap_ssid, self._ap_array):\n index += 1\n logger.info(f\"id = {index}, ssid = {ap_ssid}\")\n wifi_network = {\"id\": index, \"ssid\": str(ap_ssid)}\n self._ap_array.append(json.loads(json.dumps(wifi_network)))\n self.store_wifi_networks_to_file()\n else:\n self._ap_array = []\n else:\n self.load_wifi_networks_from_file()\n\n return self._ap_array\n\n def store_wpa_ssid_key(self, ssid, wifi_key):\n \"\"\"\n Store the WPA SSID and key, and update the WPA supplicant configuration.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if the update is successful, False otherwise.\n\n Example:\n success = instance.store_wpa_ssid_key(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n logger.info(f\"ssid: {ssid}, wifi_key: {wifi_key}\")\n return self.update_wpa_supplicant(ssid, wifi_key)\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def is_raspberry_pi_zero(self):\n \"\"\"\n Check whether we're hosted in an RPi Zero or not.\n \"\"\"\n try:\n with open(\"/proc/cpuinfo\", encoding=\"utf8\") as cpuinfo:\n for line in cpuinfo:\n if line.startswith(\"Model\"):\n model_info = line.strip().split(\":\")\n model_name = model_info[1].strip()\n return \"Raspberry Pi Zero\" in model_name\n return False\n except FileNotFoundError as fnfex:\n logger.error(f\"Error: {fnfex}\")\n return False\n\n def write_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Write the WPA supplicant configuration to a temporary file.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n \"\"\"\n with open(WPA_SUPL_CONF_TMP, \"w\", encoding=\"utf8\") as temp_conf_file:\n temp_conf_file.write(\"ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\\n\")\n temp_conf_file.write(\"update_config=1\\n\")\n temp_conf_file.write(\"\\n\")\n temp_conf_file.write(\"network={\\n\")\n temp_conf_file.write('\tssid=\"' + str(ssid) + '\"\\n')\n if wifi_key == \"\":\n temp_conf_file.write(\"\tkey_mgmt=NONE\\n\")\n else:\n temp_conf_file.write('\tpsk=\"' + str(wifi_key) + '\"\\n')\n temp_conf_file.write(\"}\\n\")\n temp_conf_file.close()\n\n def get_wireless_interface(self):\n \"\"\"\n Get the wireless interface name of the device.\n\n Returns:\n str: The wireless interface name.\n\n Example:\n interface_name = instance.get_wireless_interface()\n \"\"\"\n try:\n ifconfig_output = subprocess.check_output([\"ifconfig\"]).decode(\"utf-8\")\n wireless_interfaces = re.findall(r\"wlan[0-9]+\", ifconfig_output)\n if wireless_interfaces:\n return wireless_interfaces[0]\n except subprocess.CalledProcessError as ex:\n logger.error(f\"Error: {ex}\")\n raise\n return None\n\n def update_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Update the WPA supplicant configuration and check for internet connectivity.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if connected to the internet after the update, False otherwise.\n\n Example:\n connected = instance.update_wpa_supplicant(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n self._is_connected_to_inet = False\n if RUNNING_UNIT_TESTS and ssid == DUMMY_SSID and wifi_key == DUMMY_PASSKEY:\n return True\n # In case of Raspberry Pi Zero NetworkManager stucks. So let's go with the wap_supplicant\n # modification approach.\n if self.is_raspberry_pi_zero():\n self.write_wpa_supplicant(ssid, wifi_key)\n os.system(\n \"cp /etc/wpa_supplicant/wpa_supplicant.conf \\\n /etc/wpa_supplicant/wpa_supplicant.conf.bak\"\n )\n os.system(\"cp \" + WPA_SUPL_CONF_TMP + \" /etc/wpa_supplicant/wpa_supplicant.conf\")\n wpa_cli_cmd = \"sudo wpa_cli -i wlan0 reconfigure\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command {wpa_cli_cmd}:{output.decode('utf8')}\")\n else:\n wpa_cli_cmd = f\"sudo nmcli device wifi connect {ssid} password {wifi_key}\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}:{output.decode('utf8')}`\")\n\n wireless_interface = self.get_wireless_interface()\n logger.info(f\"wireless_interface `{wireless_interface}`\")\n wpa_cli_cmd = f\"wpa_cli -i {wireless_interface} status | grep state | cut -d'=' -f2\"\n logger.info(f\"Command to run: `{wpa_cli_cmd}`\")\n retries = 0\n while retries < 30:\n retries = retries + 1\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}`:{output.decode('utf8')}\")\n if str(output.decode(\"utf8\")) == \"COMPLETED\\n\":\n self._is_connected_to_inet = True\n else:\n time.sleep(2)\n\n logger.info(f\"Connected to internet: {self._is_connected_to_inet}\")\n return self._is_connected_to_inet\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def sleep_and_reboot_for_wpa(self):\n \"\"\"\n Sleep for a short period and then reboot the system.\n \"\"\"\n self.system_reboot()" }, { "identifier": "Command", "path": "app/raspi/const.py", "snippet": "class Command(Enum):\n \"\"\"Supported Commands Enumerator.\"\"\"\n\n TURN_OFF_VALVE = 0\n TURN_ON_VALVE = 1\n SEND_PROGRAM = 2\n SEND_TIMEZONE = 3\n REBOOT_RPI = 4\n DELETE_PROGRAM = 5\n UPDATE_RPI = 6" } ]
import time import os import json import threading import sys import paho.mqtt.client as mqtt from threading import Thread from loguru import logger from app.raspi.services import Services from app.raspi.const import ( MQTT_CLIENT_ID, MQTT_TOPIC_STATUS, MQTT_TOPIC_METADATA, MQTT_TOPIC_CONFIG, MQTT_TOPIC_CMD, MQTT_TOPIC_VALVES, MQTT_STATUS_ERR, PROGRAM, PROGRAM_EXT, MQTT_STATUS_OK, MQTT_OK, MQTT_END, MQTT_USER, MQTT_PASS, MQTT_HOST, MQTT_PORT, ) from app.raspi.helpers import Helpers from app.raspi.const import Command
11,026
) Mqtt().get_periodic_updates_thread().start() else: logger.info(f"Connect returned result code: {return_code}") @staticmethod def handle_valves(client, data): """Handle valves.""" try: logger.info(f"valves data received={data}") Helpers().set_valves(data) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) # Program Configuration handler # 1. It should parse the configuration as a JSON string # 2. If it is correct it should store it as a local file # 3. A scheduler should launch to turn on the irrigator for every cycle @staticmethod def handle_config(client, data): """Handle cfg.""" try: json_data = json.loads(data) logger.info(f"prestored programs={json_data}") for program in json_data: logger.info(f"program={program}") if program == {}: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) return Services().store_program_cycles(program, True) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def handle_command(client, data): """Handle cmd.""" try: json_data = json.loads(data) logger.info(json_data) cmd = json_data["cmd"] command = Command(cmd) try: valve = json_data["out"] except Exception as exception: logger.warning( f"Could not find valve out parameter. \ Will use valve 1: {exception}" ) valve = 1 file_path = PROGRAM + str(valve) + PROGRAM_EXT if command in (Command.TURN_ON_VALVE, Command.TURN_OFF_VALVE): Helpers().toggle(cmd, "out" + str(valve)) statuses = Helpers().get_toggle_statuses() logger.info(f"Publishing right away Statuses to MQTT topic: {MQTT_TOPIC_STATUS}: {statuses}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(statuses)) elif command == Command.SEND_PROGRAM: logger.info(f"Looking for {file_path}") if os.path.exists(file_path): logger.info(f"{file_path} exists!") with open(file_path, encoding="utf-8") as json_file: json_data = json.load(json_file) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(json_data)) else: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + file_path + " does not exist!" + MQTT_END) elif command == Command.DELETE_PROGRAM: if not Services().delete_program(valve): Mqtt.publish_to_topic( client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + file_path + " does not exist! Cannot be deleted." + MQTT_END ) elif command == Command.SEND_TIMEZONE: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + str(Helpers().get_timezone() + MQTT_END)) elif command == Command.REBOOT_RPI: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) Helpers().system_reboot() elif command == Command.UPDATE_RPI: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) Helpers().system_update() else: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + "Wrong command used!" + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def publish_to_topic(client, topic, data, retained=True): """Publish to MQTT Topic.""" client.publish(topic, data, qos=2, retain=retained) # The callback for when a PUBLISH message is received from the server. @staticmethod def on_message(client, userdata, msg): """OnMessage handler.""" topic = msg.topic data = msg.payload.decode("utf-8") logger.info(f"Received message from topic:{topic}, userdata:{userdata}, data:{data}") if topic == MQTT_TOPIC_CONFIG: Mqtt.handle_config(client, data) elif msg.topic == MQTT_TOPIC_CMD: Mqtt.handle_command(client, data) elif msg.topic == MQTT_TOPIC_VALVES: Mqtt.handle_valves(client, data) @staticmethod def send_periodic_updates(client): """Send periodic updates.""" while True: try: logger.info("Sending Periodic Updates to status topic every 10s...") statuses = Helpers().get_toggle_statuses() logger.info(f"Publishing Statuses to MQTT topic: {MQTT_TOPIC_STATUS}: {statuses}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(statuses)) metadata = {} metadata["ip_address"] = Helpers().extract_local_ip() metadata["uptime"] = Helpers().get_uptime() metadata["git_commit"] = Helpers().get_git_commit_id()
"""MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class Mqtt: """MQTT Methods Class.""" __instance = None __lock = threading.Lock() client = None def __new__(cls): """ Create a new instance of the Mqtt class using the singleton design pattern. Returns: An instance of the Mqtt class. Example Usage: instance = Mqtt() """ if cls.__instance is None: with cls.__lock: cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code cls._mqtt_thread = None cls._periodic_updates_thread = None logger.debug(f"Returning Mqtt Object Class: {cls.__instance}") return cls.__instance @classmethod def destroy_instance(cls): """ Destroy the instance of the Mqtt class. This method sets the instance of the Mqtt class to None, effectively destroying the instance. Example Usage: ```python instance = Mqtt() # Create an instance of the Mqtt class Mqtt.destroy_instance() # Destroy the instance print(instance) # Output: None ``` Inputs: None Outputs: None """ logger.debug(f"Destroying Mqtt Object Class: {cls.__instance}") cls.__instance = None cls._mqtt_thread = None cls._periodic_updates_thread = None def get_mqtt_thread(self): """Getter.""" logger.debug(f"Getting current thread: {self._mqtt_thread}") return self._mqtt_thread def set_mqtt_thread(self, mqtt_thread): """Setter.""" logger.debug(f"Setting new thread: {mqtt_thread}") self._mqtt_thread = mqtt_thread def get_periodic_updates_thread(self): """Getter.""" return self._periodic_updates_thread def set_periodic_updates_thread(self, periodic_updates_thread): """Setter.""" self._periodic_updates_thread = periodic_updates_thread def is_running(self): """Check whether mqtt thread state.""" # logger.info(str(mqtt_thread)) # logger.info(str(mqtt_thread is not None)) # logger.info(str(mqtt_thread.is_alive())) return self._mqtt_thread is not None and self._mqtt_thread.is_alive() @staticmethod def on_disconnect(client, data, return_code=0): """OnDisconnect callback.""" logger.debug(f"MQTT OnDisconnect: {client}:{data}:{return_code}") # The callback for when the client # receives a CONNACK response from the server. @staticmethod def on_connect(client, userdata, flags, return_code): """OnConnect callback.""" logger.debug(f"MQTT OnConnect: {client}:{userdata}:{flags}:{return_code}") client.connected_flag = True # subscribe to the RASPIRRI TOPICS logger.debug( f"MQTT OnConnect: Subscribing to topics:\ {MQTT_TOPIC_STATUS},\ {MQTT_TOPIC_CONFIG},\ {MQTT_TOPIC_CMD},\ {MQTT_TOPIC_VALVES}" ) client.subscribe(MQTT_TOPIC_STATUS) client.subscribe(MQTT_TOPIC_CONFIG) client.subscribe(MQTT_TOPIC_CMD) client.subscribe(MQTT_TOPIC_VALVES) if return_code == 0: logger.info("Connected successfully") Helpers().load_toggle_statuses_from_file() if Mqtt().get_periodic_updates_thread() is None: Mqtt().set_periodic_updates_thread( Thread(daemon=True, name="PeriodicUpdatesThread", target=Mqtt.send_periodic_updates, args=(client,)) ) Mqtt().get_periodic_updates_thread().start() else: logger.info(f"Connect returned result code: {return_code}") @staticmethod def handle_valves(client, data): """Handle valves.""" try: logger.info(f"valves data received={data}") Helpers().set_valves(data) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) # Program Configuration handler # 1. It should parse the configuration as a JSON string # 2. If it is correct it should store it as a local file # 3. A scheduler should launch to turn on the irrigator for every cycle @staticmethod def handle_config(client, data): """Handle cfg.""" try: json_data = json.loads(data) logger.info(f"prestored programs={json_data}") for program in json_data: logger.info(f"program={program}") if program == {}: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) return Services().store_program_cycles(program, True) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def handle_command(client, data): """Handle cmd.""" try: json_data = json.loads(data) logger.info(json_data) cmd = json_data["cmd"] command = Command(cmd) try: valve = json_data["out"] except Exception as exception: logger.warning( f"Could not find valve out parameter. \ Will use valve 1: {exception}" ) valve = 1 file_path = PROGRAM + str(valve) + PROGRAM_EXT if command in (Command.TURN_ON_VALVE, Command.TURN_OFF_VALVE): Helpers().toggle(cmd, "out" + str(valve)) statuses = Helpers().get_toggle_statuses() logger.info(f"Publishing right away Statuses to MQTT topic: {MQTT_TOPIC_STATUS}: {statuses}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(statuses)) elif command == Command.SEND_PROGRAM: logger.info(f"Looking for {file_path}") if os.path.exists(file_path): logger.info(f"{file_path} exists!") with open(file_path, encoding="utf-8") as json_file: json_data = json.load(json_file) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(json_data)) else: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + file_path + " does not exist!" + MQTT_END) elif command == Command.DELETE_PROGRAM: if not Services().delete_program(valve): Mqtt.publish_to_topic( client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + file_path + " does not exist! Cannot be deleted." + MQTT_END ) elif command == Command.SEND_TIMEZONE: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + str(Helpers().get_timezone() + MQTT_END)) elif command == Command.REBOOT_RPI: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) Helpers().system_reboot() elif command == Command.UPDATE_RPI: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) Helpers().system_update() else: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + "Wrong command used!" + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def publish_to_topic(client, topic, data, retained=True): """Publish to MQTT Topic.""" client.publish(topic, data, qos=2, retain=retained) # The callback for when a PUBLISH message is received from the server. @staticmethod def on_message(client, userdata, msg): """OnMessage handler.""" topic = msg.topic data = msg.payload.decode("utf-8") logger.info(f"Received message from topic:{topic}, userdata:{userdata}, data:{data}") if topic == MQTT_TOPIC_CONFIG: Mqtt.handle_config(client, data) elif msg.topic == MQTT_TOPIC_CMD: Mqtt.handle_command(client, data) elif msg.topic == MQTT_TOPIC_VALVES: Mqtt.handle_valves(client, data) @staticmethod def send_periodic_updates(client): """Send periodic updates.""" while True: try: logger.info("Sending Periodic Updates to status topic every 10s...") statuses = Helpers().get_toggle_statuses() logger.info(f"Publishing Statuses to MQTT topic: {MQTT_TOPIC_STATUS}: {statuses}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, str(statuses)) metadata = {} metadata["ip_address"] = Helpers().extract_local_ip() metadata["uptime"] = Helpers().get_uptime() metadata["git_commit"] = Helpers().get_git_commit_id()
Mqtt.publish_to_topic(client, MQTT_TOPIC_METADATA, str(metadata))
3
2023-12-22 08:06:09+00:00
16k
shibing624/chatgpt-webui
src/models.py
[ { "identifier": "shared", "path": "src/shared.py", "snippet": "class State:\n def interrupt(self):\n def recover(self):\n def set_api_host(self, api_host: str):\n def reset_api_host(self):\n def reset_all(self):\n def set_api_key_queue(self, api_key_list):\n def switching_api_key(self, func):\n def wrapped(*args, **kwargs):" }, { "identifier": "config", "path": "src/config.py", "snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):" }, { "identifier": "BaseLLMModel", "path": "src/base_model.py", "snippet": "class BaseLLMModel:\n def __init__(\n self,\n model_name,\n system_prompt=INITIAL_SYSTEM_PROMPT,\n temperature=1.0,\n top_p=1.0,\n n_choices=1,\n stop=\"\",\n max_generation_token=None,\n presence_penalty=0,\n frequency_penalty=0,\n logit_bias=None,\n user=\"\",\n single_turn=False,\n ) -> None:\n self.history = []\n self.all_token_counts = []\n self.model_name = model_name\n self.model_type = ModelType.get_type(model_name)\n try:\n self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]\n except KeyError:\n self.token_upper_limit = DEFAULT_TOKEN_LIMIT\n self.interrupted = False\n self.system_prompt = system_prompt\n self.api_key = None\n self.need_api_key = False\n self.history_file_path = get_first_history_name(user)\n self.user_name = user\n self.chatbot = []\n\n self.default_single_turn = single_turn\n self.default_temperature = temperature\n self.default_top_p = top_p\n self.default_n_choices = n_choices\n self.default_stop_sequence = stop\n self.default_max_generation_token = max_generation_token\n self.default_presence_penalty = presence_penalty\n self.default_frequency_penalty = frequency_penalty\n self.default_logit_bias = logit_bias\n self.default_user_identifier = user\n\n self.single_turn = single_turn\n self.temperature = temperature\n self.top_p = top_p\n self.n_choices = n_choices\n self.stop_sequence = stop\n self.max_generation_token = max_generation_token\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.logit_bias = logit_bias\n self.user_identifier = user\n\n self.metadata = {}\n\n def get_answer_stream_iter(self):\n \"\"\"stream predict, need to be implemented\n conversations are stored in self.history, with the most recent question, in OpenAI format\n should return a generator, each time give the next word (str) in the answer\n \"\"\"\n logger.warning(\"stream predict not implemented, using at once predict instead\")\n response, _ = self.get_answer_at_once()\n yield response\n\n def get_answer_at_once(self):\n \"\"\"predict at once, need to be implemented\n conversations are stored in history, with the most recent question, in OpenAI format\n Should return:\n the answer (str)\n total token count (int)\n \"\"\"\n logger.warning(\"at once predict not implemented, using stream predict instead\")\n response_iter = self.get_answer_stream_iter()\n count = 0\n response = ''\n for response in response_iter:\n count += 1\n return response, sum(self.all_token_counts) + count\n\n def billing_info(self):\n \"\"\"get billing infomation, inplement if needed\"\"\"\n return BILLING_NOT_APPLICABLE_MSG\n\n def count_token(self, user_input):\n \"\"\"get token count from input, implement if needed\"\"\"\n return len(user_input)\n\n def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=\"\"):\n def get_return_value():\n return chatbot, status_text\n\n status_text = i18n(\"开始实时传输回答……\")\n if fake_input:\n chatbot.append((fake_input, \"\"))\n else:\n chatbot.append((inputs, \"\"))\n\n user_token_count = self.count_token(inputs)\n self.all_token_counts.append(user_token_count)\n logger.debug(f\"输入token计数: {user_token_count}\")\n\n stream_iter = self.get_answer_stream_iter()\n\n if display_append:\n display_append = (\n '\\n\\n<hr class=\"append-display no-in-raw\" />' + display_append\n )\n\n partial_text = \"\"\n token_increment = 1\n for partial_text in stream_iter:\n if type(partial_text) == tuple:\n partial_text, token_increment = partial_text\n chatbot[-1] = (chatbot[-1][0], partial_text + display_append)\n self.all_token_counts[-1] += token_increment\n status_text = self.token_message()\n yield get_return_value()\n if self.interrupted:\n self.recover()\n break\n self.history.append(construct_assistant(partial_text))\n\n def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=\"\"):\n if fake_input:\n chatbot.append((fake_input, \"\"))\n else:\n chatbot.append((inputs, \"\"))\n if fake_input is not None:\n user_token_count = self.count_token(fake_input)\n else:\n user_token_count = self.count_token(inputs)\n self.all_token_counts.append(user_token_count)\n ai_reply, total_token_count = self.get_answer_at_once()\n self.history.append(construct_assistant(ai_reply))\n if fake_input is not None:\n self.history[-2] = construct_user(fake_input)\n chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)\n if fake_input is not None:\n self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))\n else:\n self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)\n status_text = self.token_message()\n return chatbot, status_text\n\n def handle_file_upload(self, files, chatbot, language):\n \"\"\"if the model accepts modal input, implement this function\"\"\"\n status = gr.Markdown.update()\n if files:\n construct_index(self.api_key, files=files)\n status = i18n(\"索引构建完成\")\n return gr.Files.update(), chatbot, status\n\n def prepare_inputs(\n self, real_inputs, use_websearch,\n files, reply_language, chatbot,\n load_from_cache_if_possible=True,\n ):\n display_append = []\n limited_context = False\n if type(real_inputs) == list:\n fake_inputs = real_inputs[0][\"text\"]\n else:\n fake_inputs = real_inputs\n if files:\n from langchain.vectorstores.base import VectorStoreRetriever\n from langchain.retrievers import BM25Retriever, EnsembleRetriever\n limited_context = True\n msg = \"加载索引中……\"\n logger.info(msg)\n index, documents = construct_index(\n self.api_key,\n files=files,\n load_from_cache_if_possible=load_from_cache_if_possible,\n )\n assert index is not None, \"获取索引失败\"\n msg = \"索引获取成功,生成回答中……\"\n logger.info(msg)\n k = 3\n score_threshold = 0.6\n with retrieve_proxy():\n vec_retriever = VectorStoreRetriever(\n vectorstore=index,\n search_type=\"similarity_score_threshold\",\n search_kwargs={\"k\": k, \"score_threshold\": score_threshold}\n )\n bm25_retriever = BM25Retriever.from_documents(documents, preprocess_func=chinese_preprocessing_func)\n bm25_retriever.k = k\n ensemble_retriever = EnsembleRetriever(\n retrievers=[bm25_retriever, vec_retriever],\n weights=[0.5, 0.5],\n )\n try:\n relevant_documents = ensemble_retriever.get_relevant_documents(fake_inputs)\n except:\n return self.prepare_inputs(\n fake_inputs,\n use_websearch,\n files,\n reply_language,\n chatbot,\n load_from_cache_if_possible=False,\n )\n reference_results = [\n [d.page_content.strip(\"�\"), os.path.basename(d.metadata[\"source\"])]\n for d in relevant_documents\n ]\n reference_results = add_source_numbers(reference_results)\n display_append = add_details(reference_results)\n display_append = \"\\n\\n\" + \"\".join(display_append)\n if type(real_inputs) == list:\n real_inputs[0][\"text\"] = (\n replace_today(PROMPT_TEMPLATE)\n .replace(\"{query_str}\", fake_inputs)\n .replace(\"{context_str}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n real_inputs = (\n replace_today(PROMPT_TEMPLATE)\n .replace(\"{query_str}\", real_inputs)\n .replace(\"{context_str}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n elif use_websearch:\n from duckduckgo_search import DDGS\n search_results = []\n with DDGS() as ddgs:\n ddgs_gen = ddgs.text(fake_inputs, backend=\"lite\")\n for r in islice(ddgs_gen, 10):\n search_results.append(r)\n reference_results = []\n for idx, result in enumerate(search_results):\n logger.debug(f\"搜索结果{idx + 1}:{result}\")\n domain_name = urllib3.util.parse_url(result[\"href\"]).host\n reference_results.append([result[\"body\"], result[\"href\"]])\n display_append.append(\n # f\"{idx+1}. [{domain_name}]({result['href']})\\n\"\n f\"<a href=\\\"{result['href']}\\\" target=\\\"_blank\\\">{idx + 1}.&nbsp;{result['title']}</a>\"\n )\n reference_results = add_source_numbers(reference_results)\n # display_append = \"<ol>\\n\\n\" + \"\".join(display_append) + \"</ol>\"\n display_append = (\n '<div class = \"source-a\">' + \"\".join(display_append) + \"</div>\"\n )\n if type(real_inputs) == list:\n real_inputs[0][\"text\"] = (\n replace_today(WEBSEARCH_PTOMPT_TEMPLATE)\n .replace(\"{query}\", fake_inputs)\n .replace(\"{web_results}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n real_inputs = (\n replace_today(WEBSEARCH_PTOMPT_TEMPLATE)\n .replace(\"{query}\", fake_inputs)\n .replace(\"{web_results}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n display_append = \"\"\n return limited_context, fake_inputs, display_append, real_inputs, chatbot\n\n def predict(\n self,\n inputs,\n chatbot,\n stream=False,\n use_websearch=False,\n files=None,\n reply_language=\"中文\",\n should_check_token_count=True,\n ): # repetition_penalty, top_k\n\n status_text = \"开始生成回答……\"\n if type(inputs) == list:\n logger.info(\n \"用户\"\n + f\"{self.user_name}\"\n + \"的输入为:\"\n + \"(\"\n + str(len(inputs) - 1)\n + \" images) \"\n + f\"{inputs[0]['text']}\"\n )\n else:\n logger.info(\n \"用户\"\n + f\"{self.user_name}\"\n + \"的输入为:\"\n + f\"{inputs}\"\n )\n if should_check_token_count:\n if type(inputs) == list:\n yield chatbot + [(inputs[0][\"text\"], \"\")], status_text\n else:\n yield chatbot + [(inputs, \"\")], status_text\n if reply_language == \"跟随问题语言(不稳定)\":\n reply_language = \"the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch.\"\n\n limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(\n real_inputs=inputs,\n use_websearch=use_websearch,\n files=files,\n reply_language=reply_language,\n chatbot=chatbot\n )\n yield chatbot + [(fake_inputs, \"\")], status_text\n\n if (\n self.need_api_key and\n self.api_key is None\n and not shared.state.multi_api_key\n ):\n status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG\n logger.info(status_text)\n chatbot.append((inputs, \"\"))\n if len(self.history) == 0:\n self.history.append(construct_user(inputs))\n self.history.append(\"\")\n self.all_token_counts.append(0)\n else:\n self.history[-2] = construct_user(inputs)\n yield chatbot + [(inputs, \"\")], status_text\n return\n elif len(inputs.strip()) == 0:\n status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG\n logger.info(status_text)\n yield chatbot + [(inputs, \"\")], status_text\n return\n\n if self.single_turn:\n self.history = []\n self.all_token_counts = []\n if type(inputs) == list:\n self.history.append(inputs)\n else:\n self.history.append(construct_user(inputs))\n\n try:\n if stream:\n logger.debug(\"使用流式传输\")\n iter = self.stream_next_chatbot(\n inputs,\n chatbot,\n fake_input=fake_inputs,\n display_append=display_append,\n )\n for chatbot, status_text in iter:\n yield chatbot, status_text\n else:\n logger.debug(\"不使用流式传输\")\n chatbot, status_text = self.next_chatbot_at_once(\n inputs,\n chatbot,\n fake_input=fake_inputs,\n display_append=display_append,\n )\n yield chatbot, status_text\n except Exception as e:\n traceback.print_exc()\n status_text = STANDARD_ERROR_MSG + str(e)\n yield chatbot, status_text\n\n if len(self.history) > 1 and self.history[-1][\"content\"] != inputs:\n logger.info(\"回答为:\" + f\"{self.history[-1]['content']}\")\n\n if limited_context:\n self.history = []\n self.all_token_counts = []\n\n max_token = self.token_upper_limit - TOKEN_OFFSET\n\n if sum(self.all_token_counts) > max_token and should_check_token_count:\n count = 0\n while (\n sum(self.all_token_counts)\n > self.token_upper_limit * REDUCE_TOKEN_FACTOR\n and sum(self.all_token_counts) > 0\n ):\n count += 1\n del self.all_token_counts[0]\n del self.history[:2]\n logger.info(status_text)\n status_text = f\"为了防止token超限,模型忘记了早期的 {count} 轮对话\"\n yield chatbot, status_text\n\n def retry(\n self,\n chatbot,\n stream=False,\n use_websearch=False,\n files=None,\n reply_language=\"中文\",\n ):\n logger.debug(\"重试中……\")\n if len(self.history) > 1:\n inputs = self.history[-2][\"content\"]\n del self.history[-2:]\n if len(self.all_token_counts) > 0:\n self.all_token_counts.pop()\n elif len(chatbot) > 0:\n inputs = chatbot[-1][0]\n if '<div class=\"user-message\">' in inputs:\n inputs = inputs.split('<div class=\"user-message\">')[1]\n inputs = inputs.split(\"</div>\")[0]\n elif len(self.history) == 1:\n inputs = self.history[-1][\"content\"]\n del self.history[-1]\n else:\n yield chatbot, f\"{STANDARD_ERROR_MSG}上下文是空的\"\n return\n\n iter = self.predict(\n inputs,\n chatbot,\n stream=stream,\n use_websearch=use_websearch,\n files=files,\n reply_language=reply_language,\n )\n for x in iter:\n yield x\n logger.debug(\"重试完毕\")\n\n def interrupt(self):\n self.interrupted = True\n\n def recover(self):\n self.interrupted = False\n\n def set_token_upper_limit(self, new_upper_limit):\n self.token_upper_limit = new_upper_limit\n logger.info(f\"token上限设置为{new_upper_limit}\")\n self.auto_save()\n\n def set_temperature(self, new_temperature):\n self.temperature = new_temperature\n self.auto_save()\n\n def set_top_p(self, new_top_p):\n self.top_p = new_top_p\n self.auto_save()\n\n def set_n_choices(self, new_n_choices):\n self.n_choices = new_n_choices\n self.auto_save()\n\n def set_stop_sequence(self, new_stop_sequence: str):\n new_stop_sequence = new_stop_sequence.split(\",\")\n self.stop_sequence = new_stop_sequence\n self.auto_save()\n\n def set_max_tokens(self, new_max_tokens):\n self.max_generation_token = new_max_tokens\n self.auto_save()\n\n def set_presence_penalty(self, new_presence_penalty):\n self.presence_penalty = new_presence_penalty\n self.auto_save()\n\n def set_frequency_penalty(self, new_frequency_penalty):\n self.frequency_penalty = new_frequency_penalty\n self.auto_save()\n\n def set_logit_bias(self, logit_bias):\n self.logit_bias = logit_bias\n self.auto_save()\n\n def encoded_logit_bias(self):\n if self.logit_bias is None:\n return {}\n logit_bias = self.logit_bias.split()\n bias_map = {}\n encoding = tiktoken.get_encoding(\"cl100k_base\")\n for line in logit_bias:\n word, bias_amount = line.split(\":\")\n if word:\n for token in encoding.encode(word):\n bias_map[token] = float(bias_amount)\n return bias_map\n\n def set_user_identifier(self, new_user_identifier):\n self.user_identifier = new_user_identifier\n self.auto_save()\n\n def set_system_prompt(self, new_system_prompt):\n self.system_prompt = new_system_prompt\n self.auto_save()\n\n def set_key(self, new_access_key):\n self.api_key = new_access_key.strip()\n msg = i18n(\"API密钥更改为了\") + hide_middle_chars(self.api_key)\n logger.info(msg)\n return self.api_key, msg\n\n def set_single_turn(self, new_single_turn):\n self.single_turn = new_single_turn\n self.auto_save()\n\n def reset(self, remain_system_prompt=False):\n self.history = []\n self.all_token_counts = []\n self.interrupted = False\n self.history_file_path = new_auto_history_filename(self.user_name)\n history_name = self.history_file_path[:-5]\n choices = [history_name] + get_history_names(self.user_name)\n system_prompt = self.system_prompt if remain_system_prompt else \"\"\n\n self.single_turn = self.default_single_turn\n self.temperature = self.default_temperature\n self.top_p = self.default_top_p\n self.n_choices = self.default_n_choices\n self.stop_sequence = self.default_stop_sequence\n self.max_generation_token = self.default_max_generation_token\n self.presence_penalty = self.default_presence_penalty\n self.frequency_penalty = self.default_frequency_penalty\n self.logit_bias = self.default_logit_bias\n self.user_identifier = self.default_user_identifier\n\n return (\n [],\n self.token_message([0]),\n gr.Radio.update(choices=choices, value=history_name),\n system_prompt,\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n self.stop_sequence,\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n\n def delete_first_conversation(self):\n if self.history:\n del self.history[:2]\n del self.all_token_counts[0]\n return self.token_message()\n\n def delete_last_conversation(self, chatbot):\n if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:\n msg = \"由于包含报错信息,只删除chatbot记录\"\n chatbot = chatbot[:-1]\n return chatbot, self.history\n if len(self.history) > 0:\n self.history = self.history[:-2]\n if len(chatbot) > 0:\n msg = \"删除了一组chatbot对话\"\n chatbot = chatbot[:-1]\n if len(self.all_token_counts) > 0:\n msg = \"删除了一组对话的token计数记录\"\n self.all_token_counts.pop()\n msg = \"删除了一组对话\"\n self.chatbot = chatbot\n self.auto_save(chatbot)\n return chatbot, msg\n\n def token_message(self, token_lst=None):\n if token_lst is None:\n token_lst = self.all_token_counts\n token_sum = 0\n for i in range(len(token_lst)):\n token_sum += sum(token_lst[: i + 1])\n return (\n i18n(\"Token 计数: \")\n + f\"{sum(token_lst)}\"\n + i18n(\",本次对话累计消耗了 \")\n + f\"{token_sum} tokens\"\n )\n\n def rename_chat_history(self, filename, chatbot):\n if filename == \"\":\n return gr.update()\n if not filename.endswith(\".json\"):\n filename += \".json\"\n self.delete_chat_history(self.history_file_path)\n # 命名重复检测\n repeat_file_index = 2\n full_path = os.path.join(HISTORY_DIR, self.user_name, filename)\n while os.path.exists(full_path):\n full_path = os.path.join(\n HISTORY_DIR, self.user_name, f\"{repeat_file_index}_{filename}\"\n )\n repeat_file_index += 1\n filename = os.path.basename(full_path)\n\n self.history_file_path = filename\n save_file(filename, self, chatbot)\n return init_history_list(self.user_name)\n\n def auto_name_chat_history(\n self, name_chat_method, user_question, chatbot, single_turn_checkbox\n ):\n if len(self.history) == 2 and not single_turn_checkbox:\n user_question = self.history[0][\"content\"]\n if type(user_question) == list:\n user_question = user_question[0][\"text\"]\n filename = replace_special_symbols(user_question)[:16] + \".json\"\n return self.rename_chat_history(filename, chatbot)\n else:\n return gr.update()\n\n def auto_save(self, chatbot=None):\n if chatbot is None:\n chatbot = self.chatbot\n save_file(self.history_file_path, self, chatbot)\n\n def export_markdown(self, filename, chatbot):\n if filename == \"\":\n return\n if not filename.endswith(\".md\"):\n filename += \".md\"\n save_file(filename, self, chatbot)\n\n def load_chat_history(self, new_history_file_path=None):\n logger.debug(f\"{self.user_name} 加载对话历史中……\")\n if new_history_file_path is not None:\n if type(new_history_file_path) != str:\n # copy file from new_history_file_path.name to os.path.join(HISTORY_DIR, self.user_name)\n new_history_file_path = new_history_file_path.name\n shutil.copyfile(\n new_history_file_path,\n os.path.join(\n HISTORY_DIR,\n self.user_name,\n os.path.basename(new_history_file_path),\n ),\n )\n self.history_file_path = os.path.basename(new_history_file_path)\n else:\n self.history_file_path = new_history_file_path\n try:\n if self.history_file_path == os.path.basename(self.history_file_path):\n history_file_path = os.path.join(\n HISTORY_DIR, self.user_name, self.history_file_path\n )\n else:\n history_file_path = self.history_file_path\n if not self.history_file_path.endswith(\".json\"):\n history_file_path += \".json\"\n saved_json = {}\n if os.path.exists(history_file_path):\n with open(history_file_path, \"r\", encoding=\"utf-8\") as f:\n saved_json = json.load(f)\n try:\n if type(saved_json[\"history\"][0]) == str:\n logger.info(\"历史记录格式为旧版,正在转换……\")\n new_history = []\n for index, item in enumerate(saved_json[\"history\"]):\n if index % 2 == 0:\n new_history.append(construct_user(item))\n else:\n new_history.append(construct_assistant(item))\n saved_json[\"history\"] = new_history\n logger.info(new_history)\n except:\n pass\n if len(saved_json[\"chatbot\"]) < len(saved_json[\"history\"]) // 2:\n logger.info(\"Trimming corrupted history...\")\n saved_json[\"history\"] = saved_json[\"history\"][-len(saved_json[\"chatbot\"]):]\n logger.info(f\"Trimmed history: {saved_json['history']}\")\n logger.debug(f\"{self.user_name} 加载对话历史完毕\")\n self.history = saved_json[\"history\"]\n self.single_turn = saved_json.get(\"single_turn\", self.single_turn)\n self.temperature = saved_json.get(\"temperature\", self.temperature)\n self.top_p = saved_json.get(\"top_p\", self.top_p)\n self.n_choices = saved_json.get(\"n_choices\", self.n_choices)\n self.stop_sequence = list(saved_json.get(\"stop_sequence\", self.stop_sequence))\n self.token_upper_limit = saved_json.get(\n \"token_upper_limit\", self.token_upper_limit\n )\n self.max_generation_token = saved_json.get(\n \"max_generation_token\", self.max_generation_token\n )\n self.presence_penalty = saved_json.get(\n \"presence_penalty\", self.presence_penalty\n )\n self.frequency_penalty = saved_json.get(\n \"frequency_penalty\", self.frequency_penalty\n )\n self.logit_bias = saved_json.get(\"logit_bias\", self.logit_bias)\n self.user_identifier = saved_json.get(\"user_identifier\", self.user_name)\n self.metadata = saved_json.get(\"metadata\", self.metadata)\n self.chatbot = saved_json[\"chatbot\"]\n return (\n os.path.basename(self.history_file_path)[:-5],\n saved_json[\"system\"],\n saved_json[\"chatbot\"],\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n \",\".join(self.stop_sequence),\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n except:\n # 没有对话历史或者对话历史解析失败\n logger.info(f\"没有找到对话历史记录 {self.history_file_path}\")\n self.reset()\n return (\n os.path.basename(self.history_file_path),\n \"\",\n [],\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n \",\".join(self.stop_sequence),\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n\n def delete_chat_history(self, filename):\n if filename == \"CANCELED\":\n return gr.update(), gr.update(), gr.update()\n if filename == \"\":\n return i18n(\"你没有选择任何对话历史\"), gr.update(), gr.update()\n if not filename.endswith(\".json\"):\n filename += \".json\"\n if filename == os.path.basename(filename):\n history_file_path = os.path.join(HISTORY_DIR, self.user_name, filename)\n else:\n history_file_path = filename\n md_history_file_path = history_file_path[:-5] + \".md\"\n try:\n os.remove(history_file_path)\n os.remove(md_history_file_path)\n return i18n(\"删除对话历史成功\"), get_history_list(self.user_name), []\n except:\n logger.info(f\"删除对话历史失败 {history_file_path}\")\n return (\n i18n(\"对话历史\") + filename + i18n(\"已经被删除啦\"),\n get_history_list(self.user_name),\n [],\n )\n\n def auto_load(self):\n filepath = get_history_filepath(self.user_name)\n if not filepath:\n self.history_file_path = new_auto_history_filename(self.user_name)\n else:\n self.history_file_path = filepath\n return self.load_chat_history()\n\n def like(self):\n \"\"\"like the last response, implement if needed\"\"\"\n return gr.update()\n\n def dislike(self):\n \"\"\"dislike the last response, implement if needed\"\"\"\n return gr.update()\n\n def deinitialize(self):\n \"\"\"deinitialize the model, implement if needed\"\"\"\n pass" }, { "identifier": "ModelType", "path": "src/base_model.py", "snippet": "class ModelType(Enum):\n Unknown = -1\n OpenAI = 0\n ChatGLM = 1\n OpenAIInstruct = 2\n OpenAIVision = 3\n Claude = 4\n Qwen = 5\n LLaMA = 6\n\n @classmethod\n def get_type(cls, model_name: str):\n model_name_lower = model_name.lower()\n if \"gpt\" in model_name_lower:\n if \"instruct\" in model_name_lower:\n model_type = ModelType.OpenAIInstruct\n elif \"vision\" in model_name_lower:\n model_type = ModelType.OpenAIVision\n else:\n model_type = ModelType.OpenAI\n elif \"chatglm\" in model_name_lower:\n model_type = ModelType.ChatGLM\n elif \"llama\" in model_name_lower or \"alpaca\" in model_name_lower or \"yi\" in model_name_lower:\n model_type = ModelType.LLaMA\n else:\n model_type = ModelType.Unknown\n return model_type" }, { "identifier": "ChatGLMClient", "path": "src/chatglm.py", "snippet": "class ChatGLMClient(BaseLLMModel):\n def __init__(self, model_name, user_name=\"\"):\n super().__init__(model_name=model_name, user=user_name)\n import torch\n from transformers import AutoModel, AutoTokenizer\n global CHATGLM_TOKENIZER, CHATGLM_MODEL\n self.deinitialize()\n if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:\n system_name = platform.system()\n logger.info(f\"Loading model from {model_name}\")\n if model_name in LOCAL_MODELS:\n model_path = LOCAL_MODELS[model_name]\n else:\n model_path = model_name\n CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n quantified = False\n if \"int4\" in model_name:\n quantified = True\n model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map='auto', torch_dtype='auto')\n if torch.cuda.is_available():\n logger.info(\"CUDA is available, using CUDA\")\n model = model.half().cuda()\n # mps加速还存在一些问题,暂时不使用\n elif system_name == \"Darwin\" and model_path is not None and not quantified:\n logger.info(\"Running on macOS, using MPS\")\n # running on macOS and model already downloaded\n model = model.half().to(\"mps\")\n else:\n logger.info(\"GPU is not available, using CPU\")\n model = model.float()\n model = model.eval()\n logger.info(f\"Model loaded from {model_path}\")\n CHATGLM_MODEL = model\n\n def _get_glm3_style_input(self):\n history = self.history\n query = history.pop()[\"content\"]\n return history, query\n\n def _get_glm2_style_input(self):\n history = [x[\"content\"] for x in self.history]\n query = history.pop()\n logger.debug(f\"{history}\")\n assert len(history) % 2 == 0, f\"History should be even length. current history is: {history}\"\n history = [[history[i], history[i + 1]]\n for i in range(0, len(history), 2)]\n return history, query\n\n def _get_glm_style_input(self):\n if \"glm2\" in self.model_name:\n return self._get_glm2_style_input()\n else:\n return self._get_glm3_style_input()\n\n def get_answer_at_once(self):\n history, query = self._get_glm_style_input()\n response, _ = CHATGLM_MODEL.chat(\n CHATGLM_TOKENIZER, query, history=history)\n return response, len(response)\n\n def get_answer_stream_iter(self):\n history, query = self._get_glm_style_input()\n for response, history in CHATGLM_MODEL.stream_chat(\n CHATGLM_TOKENIZER,\n query,\n history,\n max_length=self.token_upper_limit,\n top_p=self.top_p,\n temperature=self.temperature,\n ):\n yield response\n\n def deinitialize(self):\n import gc\n import torch\n # 释放显存\n global CHATGLM_MODEL, CHATGLM_TOKENIZER\n CHATGLM_MODEL = None\n CHATGLM_TOKENIZER = None\n gc.collect()\n torch.cuda.empty_cache()\n logger.info(\"ChatGLM model deinitialized\")" }, { "identifier": "LLaMAClient", "path": "src/llama.py", "snippet": "class LLaMAClient(BaseLLMModel):\n def __init__(self, model_name, user_name=\"\"):\n super().__init__(model_name=model_name, user=user_name)\n from transformers import AutoModelForCausalLM, AutoTokenizer\n self.max_generation_token = 1000\n logger.info(f\"Loading model from {model_name}\")\n if model_name in LOCAL_MODELS:\n model_path = LOCAL_MODELS[model_name]\n else:\n model_path = model_name\n self.tokenizer = AutoTokenizer.from_pretrained(model_path, legacy=True, use_fast=False)\n self.model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', torch_dtype='auto').eval()\n logger.info(f\"Model loaded from {model_path}\")\n self.stop_str = self.tokenizer.eos_token or \"</s>\"\n\n def _get_chat_input(self):\n messages = []\n for conv in self.history:\n if conv[\"role\"] == \"system\":\n messages.append({'role': 'system', 'content': conv[\"content\"]})\n elif conv[\"role\"] == \"user\":\n messages.append({'role': 'user', 'content': conv[\"content\"]})\n else:\n messages.append({'role': 'assistant', 'content': conv[\"content\"]})\n input_ids = self.tokenizer.apply_chat_template(\n conversation=messages,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors='pt'\n )\n\n return input_ids.to(self.model.device)\n\n def get_answer_at_once(self):\n input_ids = self._get_chat_input()\n output_ids = self.model.generate(\n input_ids,\n max_new_tokens=self.max_generation_token,\n top_p=self.top_p,\n temperature=self.temperature,\n )\n response = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)\n\n return response, len(response)\n\n def get_answer_stream_iter(self):\n from transformers import TextIteratorStreamer\n from threading import Thread\n input_ids = self._get_chat_input()\n streamer = TextIteratorStreamer(\n self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True\n )\n thread = Thread(\n target=self.model.generate,\n kwargs={\"input_ids\": input_ids,\n \"max_new_tokens\": self.max_generation_token,\n \"top_p\": self.top_p,\n \"temperature\": self.temperature,\n \"streamer\": streamer}\n )\n thread.start()\n generated_text = \"\"\n for new_text in streamer:\n stop = False\n pos = new_text.find(self.stop_str)\n if pos != -1:\n new_text = new_text[:pos]\n stop = True\n generated_text += new_text\n yield generated_text\n if stop:\n break" }, { "identifier": "INITIAL_SYSTEM_PROMPT", "path": "src/presets.py", "snippet": "INITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"" }, { "identifier": "TIMEOUT_ALL", "path": "src/presets.py", "snippet": "TIMEOUT_ALL = 200 # 非流式对话时的超时时间" }, { "identifier": "TIMEOUT_STREAMING", "path": "src/presets.py", "snippet": "TIMEOUT_STREAMING = 60 # 流式对话时的超时时间" }, { "identifier": "STANDARD_ERROR_MSG", "path": "src/presets.py", "snippet": "STANDARD_ERROR_MSG = i18n(\"☹️发生了错误:\") # 错误信息的标准前缀" }, { "identifier": "CONNECTION_TIMEOUT_MSG", "path": "src/presets.py", "snippet": "CONNECTION_TIMEOUT_MSG = i18n(\"连接超时,无法获取对话。\") # 连接超时" }, { "identifier": "READ_TIMEOUT_MSG", "path": "src/presets.py", "snippet": "READ_TIMEOUT_MSG = i18n(\"读取超时,无法获取对话。\") # 读取超时" }, { "identifier": "ERROR_RETRIEVE_MSG", "path": "src/presets.py", "snippet": "ERROR_RETRIEVE_MSG = i18n(\"请检查网络连接,或者API-Key是否有效。\")" }, { "identifier": "GENERAL_ERROR_MSG", "path": "src/presets.py", "snippet": "GENERAL_ERROR_MSG = i18n(\"获取对话时发生错误,请查看后台日志\")" }, { "identifier": "CHAT_COMPLETION_URL", "path": "src/presets.py", "snippet": "CHAT_COMPLETION_URL = \"https://api.openai.com/v1/chat/completions\"" }, { "identifier": "SUMMARY_CHAT_SYSTEM_PROMPT", "path": "src/presets.py", "snippet": "SUMMARY_CHAT_SYSTEM_PROMPT = \"\"\"\\\nPlease summarize the following conversation for a chat topic.\nNo more than 16 characters.\nNo special characters.\nPunctuation mark is banned.\nNot including '.' ':' '?' '!' '“' '*' '<' '>'.\nReply in user's language.\n\"\"\"" }, { "identifier": "hide_middle_chars", "path": "src/utils.py", "snippet": " class DataframeData(TypedDict):\nclass ConfigType(Enum):\nclass ConfigItem:\nclass SetupWizard:\ndef predict(current_model, *args):\ndef billing_info(current_model):\ndef set_key(current_model, *args):\ndef load_chat_history(current_model, *args):\ndef delete_chat_history(current_model, *args):\ndef interrupt(current_model, *args):\ndef reset(current_model, *args):\ndef retry(current_model, *args):\ndef delete_first_conversation(current_model, *args):\ndef delete_last_conversation(current_model, *args):\ndef set_system_prompt(current_model, *args):\ndef rename_chat_history(current_model, *args):\ndef auto_name_chat_history(current_model, *args):\ndef export_markdown(current_model, *args):\ndef upload_chat_history(current_model, *args):\ndef set_token_upper_limit(current_model, *args):\ndef set_temperature(current_model, *args):\ndef set_top_p(current_model, *args):\ndef set_n_choices(current_model, *args):\ndef set_stop_sequence(current_model, *args):\ndef set_max_tokens(current_model, *args):\ndef set_presence_penalty(current_model, *args):\ndef set_frequency_penalty(current_model, *args):\ndef set_logit_bias(current_model, *args):\ndef set_user_identifier(current_model, *args):\ndef set_single_turn(current_model, *args):\ndef handle_file_upload(current_model, *args):\ndef handle_summarize_index(current_model, *args):\ndef like(current_model, *args):\ndef dislike(current_model, *args):\ndef count_token(input_str):\ndef markdown_to_html_with_syntax_highlight(md_str): # deprecated\n def replacer(match):\ndef normalize_markdown(md_text: str) -> str: # deprecated\ndef convert_mdtext(md_text): # deprecated\ndef clip_rawtext(chat_message, need_escape=True):\ndef convert_bot_before_marked(chat_message):\ndef convert_user_before_marked(chat_message):\ndef escape_markdown(text):\ndef convert_asis(userinput): # deprecated\ndef detect_converted_mark(userinput): # deprecated\ndef detect_language(code): # deprecated\ndef construct_text(role, text):\ndef construct_user(text):\ndef construct_system(text):\ndef construct_assistant(text):\ndef save_file(filename, model, chatbot):\ndef sorted_by_pinyin(list):\ndef sorted_by_last_modified_time(list, dir):\ndef get_file_names_by_type(dir, filetypes=[\".json\"]):\ndef get_file_names_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_dropdown_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_by_last_modified_time(dir, filetypes=[\".json\"]):\ndef get_history_names(user_name=\"\"):\ndef get_first_history_name(user_name=\"\"):\ndef get_history_list(user_name=\"\"):\ndef init_history_list(user_name=\"\"):\ndef filter_history(user_name, keyword):\ndef load_template(filename, mode=0):\ndef get_template_names():\ndef get_template_dropdown():\ndef get_template_content(templates, selection, original_system_prompt):\ndef reset_textbox():\ndef reset_default():\ndef change_api_host(host):\ndef change_proxy(proxy):\ndef hide_middle_chars(s):\ndef submit_key(key):\ndef replace_today(prompt):\ndef get_geoip():\n def fetch_ip():\ndef find_n(lst, max_num):\ndef start_outputing():\ndef end_outputing():\ndef cancel_outputing():\ndef transfer_input(inputs):\ndef update_chuanhu():\ndef add_source_numbers(lst, source_name=\"Source\", use_source=True):\ndef add_details(lst):\ndef sheet_to_string(sheet, sheet_name=None):\ndef excel_to_string(file_path):\ndef get_last_day_of_month(any_day):\ndef get_model_source(model_name, alternative_source):\ndef refresh_ui_elements_on_load(current_model, selected_model_name, user_name):\ndef toggle_like_btn_visibility(selected_model_name):\ndef get_corresponding_file_type_by_model_name(selected_model_name):\ndef new_auto_history_filename(username):\ndef get_history_filepath(username):\ndef beautify_err_msg(err_msg):\ndef auth_from_conf(username, password):\ndef get_files_hash(file_src=None, file_paths=None):\ndef myprint(**args):\ndef replace_special_symbols(string, replace_string=\" \"):\n def __init__(self, key, name, default=None, type=ConfigType.String) -> None:\ndef generate_prompt_string(config_item):\ndef generate_result_string(config_item, config_value):\n def __init__(self, file_path=config_file) -> None:\n def set(self, config_items: List[ConfigItem], prompt: str):\n def set_users(self):\n def __setitem__(self, setting_key: str, value):\n def __getitem__(self, setting_key: str):\n def save(self):\ndef setup_wizard():\ndef save_pkl(data, file_path):\ndef load_pkl(file_path):\ndef chinese_preprocessing_func(text: str) -> List[str]:\nSERVER_GEO_IP_MSG = None\nFETCHING_IP = False\n SERVER_GEO_IP_MSG = i18n(\"你可以使用聊天功能。\")\n SERVER_GEO_IP_MSG = \"**您的IP区域:中国。**\"\n SERVER_GEO_IP_MSG = i18n(\"您的IP区域:\") + f\"{country}。\"\n FETCHING_IP = False\n FETCHING_IP = True" } ]
import base64 import datetime import json import os import colorama import gradio as gr import requests import traceback import traceback from io import BytesIO from PIL import Image from loguru import logger from src import shared, config from src.base_model import BaseLLMModel, ModelType from src.chatglm import ChatGLMClient from src.llama import LLaMAClient from src.presets import ( INITIAL_SYSTEM_PROMPT, TIMEOUT_ALL, TIMEOUT_STREAMING, STANDARD_ERROR_MSG, CONNECTION_TIMEOUT_MSG, READ_TIMEOUT_MSG, ERROR_RETRIEVE_MSG, GENERAL_ERROR_MSG, CHAT_COMPLETION_URL, SUMMARY_CHAT_SYSTEM_PROMPT ) from src.utils import ( hide_middle_chars, count_token, construct_system, construct_user, get_last_day_of_month, i18n, replace_special_symbols, )
11,056
# -*- coding: utf-8 -*- """ Get model client from model name """ class OpenAIClient(BaseLLMModel): def __init__( self, model_name, api_key, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="", ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name, ) self.api_key = api_key self.need_api_key = True self._refresh_header() def get_answer_stream_iter(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e))
# -*- coding: utf-8 -*- """ Get model client from model name """ class OpenAIClient(BaseLLMModel): def __init__( self, model_name, api_key, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="", ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name, ) self.api_key = api_key self.need_api_key = True self._refresh_header() def get_answer_stream_iter(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e))
return i18n("**获取API使用情况失败**")
16
2023-12-27 12:14:26+00:00
16k
camenduru/AnyDoor-online-hf
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,325
if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): #c 1,3,224,224 if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): #1,1,1024 c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): #t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() t = self.time_steps.reshape( (x.shape[0],) ).to(self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False) #boundary = self.boundary.to(loss_simple.device) #boundary = F.interpolate(boundary, size = (64,64)) * 5 + 1.0 #16,1,64,64 #print(loss_simple.shape) #16,4,64,64 loss_simple = loss_simple.mean([1, 2, 3]) #.mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) #print(self.parameterization, self.learn_logvar, self.original_elbo_weight, self.lvlb_weights[t]) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
ddim_sampler = DDIMSampler(self)
16
2023-12-25 04:48:34+00:00
16k
smonsays/modular-hyperteacher
metax/data/imitation.py
[ { "identifier": "Environment", "path": "metax/data/envs/base.py", "snippet": "class Environment(abc.ABC):\n @abc.abstractproperty\n def num_actions(self) -> int:\n \"\"\" Number of possible actions.\"\"\"\n\n @abc.abstractproperty\n def observation_shape(self):\n \"\"\"The shape of the observation array\"\"\"\n\n @abc.abstractmethod\n def observe(self, env_state: EnvironmentState):\n \"\"\"Returns the observation from the environment state.\"\"\"\n\n @abc.abstractmethod\n def reset(self, rng: PRNGKey, goal: Array = None) -> Tuple[Any, EnvironmentInteraction]:\n \"\"\"Resets the environment to an initial state.\"\"\"\n\n @abc.abstractmethod\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n \"\"\"Resets the environment goal.\"\"\"\n\n def step(\n self, rng: PRNGKey, env_state: EnvironmentState, action: Array\n ) -> Tuple[EnvironmentState, EnvironmentInteraction]:\n \"\"\"Run one timestep of the environment's dynamics. Returns the Transition and the Environment state.\"\"\"\n\n # return self._step(rng, env_state, action)\n def empty_step(rng, state, action):\n \"\"\"\n Only update time and give no reward.\n \"\"\"\n new_timestep = state.timestep + 1\n new_state = state.replace(timestep=new_timestep)\n new_emission = EnvironmentInteraction(\n observation=self.observe(state),\n reward=0.0,\n done=state.done,\n timestep=new_timestep,\n )\n return new_state, new_emission\n\n # Only run env step if not already done\n return jax.lax.cond(\n env_state.done,\n empty_step,\n self._step,\n rng,\n env_state,\n action,\n )\n\n @abc.abstractmethod\n def _step(\n self, rng: PRNGKey, env_state: EnvironmentState, action: Array\n ) -> Tuple[EnvironmentState, EnvironmentInteraction]:\n \"\"\"Run one timestep of the environment's dynamics. Returns the Transition and the Environment state.\"\"\"" }, { "identifier": "CompositionalGrid", "path": "metax/data/envs/grid.py", "snippet": "class CompositionalGrid(Environment):\n def __init__(\n self,\n grid_size: int,\n num_interactions: int,\n num_mazes: int,\n num_objects: int,\n num_distractors: int,\n frac_ood: float,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n assert grid_size > 5, \"grid_size must be greater than 5\"\n\n self.grid_size = grid_size\n self.num_interactions = num_interactions\n self.num_directions = 4 # split grid into 4 quadrants for the goal position\n self.num_objects = num_objects\n self.num_mazes = num_mazes\n self.num_distractors = num_distractors\n self.frac_ood = frac_ood\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n self.num_factors = 4 # direction, interaction, maze, object\n\n # Static matrices\n self._delta_position = jnp.concatenate((\n jnp.array([[-1, 0], [0, 1], [1, 0], [0, -1]]), # up, right, down, left\n jnp.zeros((self.num_interactions, 2), dtype=jnp.int32), # no movement for interaction\n ))\n size_low, size_high = grid_size // 2, (grid_size // 2) + grid_size % 2\n self._quadrants = jnp.stack((\n np.block([\n [np.ones((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.ones((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.ones((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.ones((size_low, size_low))]\n ]),\n ))\n\n # Pregenerate possible goals and randomly split into in/out of distribution\n self.tasks_all = np.array(list(itertools.product(\n range(self.num_directions),\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n if self.task_support == \"non_compositional\":\n # in/out split with non-compositional support\n self.tasks_in_dist = np.array(list(itertools.product(\n range(self.num_directions - 1), # hold out one goal quadrant from in_dist\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n @partial(np.vectorize, signature=\"(k),(n,k)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n self.tasks_out_dist = self.tasks_all[~elem_in_array(self.tasks_all, self.tasks_in_dist)]\n\n elif \"_hot\" in self.task_support:\n num_hot = int(self.task_support.split(\"_\")[0])\n mask = jnp.sum(self.tasks_all > 0, axis=1) <= num_hot\n self.tasks_in_dist = jnp.array(self.tasks_all[mask])\n self.tasks_out_dist = jnp.array(self.tasks_all[~mask])\n\n elif self.task_support == \"random\":\n self.tasks_all = jax.random.permutation(self.rng, self.tasks_all)\n self.num_ood = int(len(self.tasks_all) * self.frac_ood)\n self.tasks_in_dist = jnp.array(self.tasks_all[: -self.num_ood])\n self.tasks_out_dist = jnp.array(self.tasks_all[-self.num_ood:])\n\n # Make sure all features for every factor are present in the in-distribution tasks\n assert len(jnp.unique(self.tasks_in_dist[:, 0])) == self.num_directions\n assert len(jnp.unique(self.tasks_in_dist[:, 1])) == self.num_interactions\n assert len(jnp.unique(self.tasks_in_dist[:, 2])) == self.num_mazes\n assert len(jnp.unique(self.tasks_in_dist[:, 3])) == self.num_objects\n else:\n raise ValueError(f\"Invalid task support: {self.task_support}\")\n\n assert len(self.tasks_in_dist) > 0\n assert len(self.tasks_out_dist) > 0\n\n # Create random mazes\n if self.num_mazes > 0:\n self.mazes = jnp.stack([\n self.generate_random_maze(self.grid_size, seed=self.seed + i)\n for i in range(self.num_mazes)\n ])\n else:\n self.mazes = jnp.zeros((1, self.grid_size, self.grid_size))\n\n # Precompute optimal paths, this is potentially expensive for large grid sizes\n optimal_paths, shortest_paths = list(\n zip(*[self._precompute_optimal_paths(m) for m in self.mazes])\n )\n self.optimal_paths, shortest_paths = jnp.stack(optimal_paths), jnp.stack(shortest_paths)\n self.valid_goal_dist = shortest_paths >= self.grid_size\n\n @property\n def num_actions(self) -> int:\n return 4 + self.num_interactions\n\n @property\n def observation_shape(self) -> Tuple[int]:\n # encodes positions of agent, objects and walls\n return (self.grid_size, self.grid_size, self.num_objects + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n assert mode in [\"ood\", \"test\", \"train\"]\n if mode == \"ood\":\n task_code = jax.random.choice(rng, self.tasks_out_dist)\n else:\n task_code = jax.random.choice(rng, self.tasks_in_dist)\n\n task_id = jnp.ravel_multi_index(\n task_code,\n dims=(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects),\n mode=\"wrap\",\n )\n emb_dim = max(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects)\n embedding = jax.nn.one_hot(task_code, emb_dim)\n\n return CompositionalGridGoal(*task_code), {\"task_id\": task_id, \"embedding\": embedding}\n\n def reset(\n self, rng: PRNGKey, goal: Optional[CompositionalGridGoal] = None\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_distractor, rng_pos1, rng_pos2, rng_pos3, rng_goal = jax.random.split(rng, 5)\n\n if goal is None:\n # Sample a goal from train distribution if None specified\n goal, _ = self.reset_goal(rng_goal, mode=\"train\")\n\n # Sample distractor objects distinct from goal object\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.num_objects,\n shape=(self.num_distractors,),\n replace=True,\n p=1.0 - (jnp.arange(self.num_objects) == goal.object)\n )\n\n # Sample distinct, random positions for agent, distractors and the goal respecting direction\n position_goal = jax.random.choice(\n key=rng_pos2,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]) * self._quadrants[goal.direction]).reshape(-1),\n )\n goal_coord = self._coord_to_idx(position_goal[0][0], position_goal[0][1])\n position_agent = jax.random.choice(\n key=rng_pos1,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]).reshape(-1) * self.valid_goal_dist[goal.maze][goal_coord]),\n )\n positions_distractors = jax.random.choice(\n key=rng_pos3,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(self.num_distractors, ),\n replace=False,\n p=1.0 - self.mazes[goal.maze].reshape(-1),\n )\n\n positions = jnp.concatenate([position_goal, positions_distractors, position_agent])\n\n env_state = CompositionalGridState(\n done=False, timestep=0, distractors=distractors, positions=positions, goal=goal\n )\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n\n return env_state, emission\n\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1, :]\n\n # Check if agent reached goal (positive reward)\n goal_reached = jnp.logical_and(\n action == (len(MOVES) + env_state.goal.interaction),\n jnp.all(pos_agent == env_state.positions[0, :]),\n )\n reward = 1.0 * goal_reached\n\n # Move the agent to new position and check if valid\n pos_new = self._delta_position[action] + pos_agent\n pos_invalid = jnp.logical_or(\n jnp.logical_or(jnp.any(pos_new < 0), jnp.any(pos_new >= self.grid_size)), # in grid?\n self.mazes[env_state.goal.maze][pos_new[0], pos_new[1]], # in wall?\n )\n pos_new = jnp.where(pos_invalid, pos_agent, pos_new)\n\n # Update state\n positions = env_state.positions.at[-1].set(pos_new)\n env_state = CompositionalGridState(\n done=goal_reached,\n timestep=env_state.timestep + 1,\n distractors=env_state.distractors,\n positions=positions,\n goal=env_state.goal,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: CompositionalGridState) -> Array:\n \"\"\"\n Encode the environment state as an asrray of shape (grid_size, grid_size, num_factors * num_objects + 1).\n For each position in the grid, the code word has the following structure:\n [factor_0_feature_0, ..., factor_0_feature_n, ..., factor_n_feature_0, ..., factor_n_feature_n, wall?, agent?]\n \"\"\"\n objects = jnp.concatenate([jnp.array([env_state.goal.object]), env_state.distractors])\n objects_hot = jax.nn.one_hot(objects, num_classes=self.num_objects)\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros(self.observation_shape)\n grid = grid.at[\n jnp.expand_dims(pos_objects[:, 0], axis=1),\n jnp.expand_dims(pos_objects[:, 1], axis=1),\n :-2,\n ].set(jnp.expand_dims(objects_hot, axis=1))\n grid = grid.at[:, :, -2].set(self.mazes[env_state.goal.maze]) # walls encoded in penultimate channel\n grid = grid.at[pos_agent[0], pos_agent[1], -1].set(1.0) # agent encoded in last channel\n\n return grid\n\n def _features_to_idx(self, features: Array) -> Array:\n \"\"\"Converts features to a unique feature index\"\"\"\n idx = [factor * self.num_objects + feature for factor, feature in enumerate(features)]\n return jnp.array(idx)\n\n def _coord_to_idx(self, x, y):\n \"\"\"Converts coordinates to a unique grid index\"\"\"\n return x * self.grid_size + y\n\n def _idx_to_coord(self, idx):\n \"\"\"Converts a grid index to grid coordinates\"\"\"\n return idx // self.grid_size, idx % self.grid_size\n\n def demonstrate(\n self, rng: PRNGKey, env_state: CompositionalGridState\n ) -> EnvironmentInteraction:\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n pos_agent, pos_goal = env_state.positions[-1, :], env_state.positions[0, :]\n idx_agent, idx_goal = self._coord_to_idx(*pos_agent), self._coord_to_idx(*pos_goal)\n optimal_actions = self.optimal_paths[env_state.goal.maze][idx_agent, idx_goal]\n\n # Fill placeholder actions with correct interaction\n mask_pad = (optimal_actions == -1)\n optimal_actions *= ~mask_pad\n optimal_actions += (len(MOVES) + env_state.goal.interaction) * mask_pad\n\n def env_step(carry, action):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), emission\n\n _, trajectory = jax.lax.scan(env_step, (rng, env_state), optimal_actions)\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission, trajectory\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, optimal_actions\n\n def _precompute_optimal_paths(self, maze: Array):\n \"\"\"Precompute the optimal trajectories for all possible states.\"\"\"\n # Create an array that encodes the graph structure of the grid to compute all shortest paths\n coordinates, no_walls_coords = [], np.argwhere(maze == 0)\n for x, y in no_walls_coords:\n edges = []\n if x > 0 and not maze[x - 1, y]:\n edges.append([x - 1, y])\n if x < self.grid_size - 1 and not maze[x + 1, y]:\n edges.append([x + 1, y])\n if y > 0 and not maze[x, y - 1]:\n edges.append([x, y - 1])\n if y < self.grid_size - 1 and not maze[x, y + 1]:\n edges.append([x, y + 1])\n\n idx_curr = self._coord_to_idx(x, y)\n coordinates += [(idx_curr, self._coord_to_idx(i, k)) for (i, k) in edges]\n\n coordinates = np.array(coordinates)\n connectivity = np.zeros((self.grid_size**2, self.grid_size**2))\n connectivity[coordinates[:, 0], coordinates[:, 1]] = 1.0\n shortest_paths, predecessors = shortest_path(connectivity, return_predecessors=True)\n max_num_actions = (self.grid_size**2) - 1\n\n def get_path(predecessors, start, end):\n \"\"\"Get the full path from the predecessor matrix.\"\"\"\n path = [end]\n while path[-1] != start:\n path.append(predecessors[start, path[-1]])\n return path[::-1]\n\n def path_to_actions(path):\n \"\"\"Convert path to actions.\"\"\"\n # Pad with placeholder actions, need to be overwritten with correct interaction in self.demonstrate()\n actions = np.full((max_num_actions), -1)\n for i in range(len(path) - 1):\n x1, y1 = self._idx_to_coord(path[i])\n x2, y2 = self._idx_to_coord(path[i + 1])\n action = np.array([x2 - x1, y2 - y1])\n action = np.where(np.all(self._delta_position == action, axis=1))[0][0]\n actions[i] = action\n return np.array(actions)\n\n # Precompute optimal paths for all possible positions\n optimal_paths = -1 * np.ones(\n (self.grid_size**2, self.grid_size**2, max_num_actions), dtype=int\n )\n for start in no_walls_coords:\n for goal in no_walls_coords:\n start_idx, goal_idx = self._coord_to_idx(*start), self._coord_to_idx(*goal)\n path = get_path(predecessors, start_idx, goal_idx)\n actions = path_to_actions(path)\n optimal_paths[start_idx, goal_idx, :] = actions\n\n return jnp.array(optimal_paths), jnp.array(shortest_paths)\n\n @staticmethod\n def generate_random_maze(\n grid_size: int, complexity: float = 0.75, density: float = 0.75, seed: int = 0\n ):\n \"\"\"\n Generate a random maze array.\n Walls are encoded as 1 and free space as 0.\n\n Adapted from https://github.com/zuoxingdong/mazelab/blob/master/mazelab/generators/random_maze.py\n which is based on https://en.wikipedia.org/wiki/Maze_generation_algorithm\n \"\"\"\n assert grid_size % 2 == 1, \"Maze size must be odd\"\n grid_size_pad = grid_size + 2\n np_rng = np.random.default_rng(seed)\n\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (grid_size_pad + grid_size_pad)))\n density = int(density * ((grid_size_pad // 2) * (grid_size_pad // 2)))\n\n # Fill borders\n grid = np.zeros((grid_size_pad, grid_size_pad), dtype=bool)\n grid[0, :] = grid[-1, :] = 1\n grid[:, 0] = grid[:, -1] = 1\n\n # Make aisles\n for _ in range(density):\n x, y = (\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n )\n grid[y, x] = 1\n for j in range(complexity):\n neighbours = []\n if x > 1:\n neighbours.append((y, x - 2))\n if x < grid_size_pad - 2:\n neighbours.append((y, x + 2))\n if y > 1:\n neighbours.append((y - 2, x))\n if y < grid_size_pad - 2:\n neighbours.append((y + 2, x))\n if len(neighbours):\n y_, x_ = neighbours[np_rng.integers(0, len(neighbours))]\n if grid[y_, x_] == 0:\n grid[y_, x_] = 1\n grid[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n x, y = x_, y_\n\n return grid.astype(int)[1:-1, 1:-1]" }, { "identifier": "CompositionalPreference", "path": "metax/data/envs/preference.py", "snippet": "class CompositionalPreference(Environment):\n # _layout = \"\"\"\\\n # wwwwwwwwwwwww\n # w w w\n # w w w\n # w w\n # w w w\n # w w w\n # ww wwww w\n # w www www\n # w w w\n # w w w\n # w w\n # w w w\n # wwwwwwwwwwwww\n # \"\"\"\n _layout = \"\"\"\\\nwwwwwww\nw w w\nw w w\nww ww\nw w w\nw w w\nwwwwwww\n\"\"\"\n _delta_position = jnp.array(\n [\n [0, 0], # NOTHING\n [-1, 0], # UP\n [0, 1], # RIGHT\n [1, 0], # DOWN\n [0, -1], # LEFT\n ]\n )\n\n def __init__(\n self,\n num_preferences: int, # ~=num_experts\n num_features: int, # ~=dim layer weight\n num_objects: int,\n num_hot: int, # ~= num_hot\n continuous_combinations: bool,\n discount: float,\n frac_ood: float,\n timelimit: int,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n self.num_preferences = num_preferences\n self.num_features = num_features\n self.num_objects = num_objects\n self.num_hot = num_hot\n self.continuous_combinations = continuous_combinations\n self.discount = discount\n self.frac_ood = frac_ood\n self.timelimit = timelimit\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n\n # We assume a fixed grid.\n self.grid = jnp.array(\n [list(map(lambda c: 0 if c == \" \" else 1, line)) for line in self._layout.splitlines()]\n )\n self.free_coord = jnp.array([(x, y) for (x, y) in zip(*np.where(self.grid == 0))])\n grid_idx_to_coord_matrix = jax.nn.one_hot(\n self.free_coord[:, 0] * self.grid.shape[1] + self.free_coord[:, 1],\n self.grid.shape[0] * self.grid.shape[1],\n )\n self.coord_matrix_to_grid_idx = jnp.argmax(grid_idx_to_coord_matrix.T, axis=-1)\n self.grid_idx_to_coord_matrix = jnp.argmax(grid_idx_to_coord_matrix, axis=-1)\n self.num_free_coord = self.free_coord.shape[0]\n self.num_available_distractors_config = 2**self.num_objects\n self.num_states = self.num_free_coord * self.num_available_distractors_config\n\n self.preference_basis = jax.random.normal(\n self.rng, (self.num_preferences, self.num_features)\n )\n\n # Generate all possible combinations of 1:num_hot experts (num_experts choose num_hot)\n preference_combin_all = []\n for h in range(1, self.num_hot + 1):\n perms = itertools.combinations(range(self.num_preferences), h)\n preference_idx = np.array(list(perms)).reshape(-1, h)\n preference_combin_all_k_hot = self.k_hot(preference_idx)\n preference_combin_all.append(preference_combin_all_k_hot)\n\n preference_combin_all = jnp.concatenate(preference_combin_all)\n\n if self.task_support == \"connected\" or self.task_support == \"disconnected\":\n assert self.num_hot == 2\n assert self.num_preferences > 4 and self.num_preferences % 2 == 0\n # connected: 0 1 2 3 4 5 6 7 01 12 23 34 45 56 67 70 02 13 24 35 46 57 60 71\n preference_combin = [self.k_hot(np.arange(self.num_preferences)[:, None])] # one-hots\n preference_combin.append(self.k_hot(np.stack(( # two-hots 01 12 23 34 45 56 67 70\n np.arange(self.num_preferences),\n (np.arange(self.num_preferences) + 1) % self.num_preferences)).T\n ))\n preference_combin.append(self.k_hot(np.stack(( # two-hots 02 13 24 35 46 57 60 71\n np.arange(self.num_preferences),\n (np.arange(self.num_preferences) + 2) % self.num_preferences)).T\n ))\n preference_combin_connected = np.concatenate(preference_combin)\n\n @partial(np.vectorize, signature=\"(n),(m,n)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n mask_connected = elem_in_array(preference_combin_all, preference_combin_connected)\n\n # disconnected: 1 and 2 hots out of (0,1,2,3) U 1 and 2 hots out of (4,5,6,7)\n mask_1_hot = jnp.sum(preference_combin_all, axis=-1) == 1\n mask_2_hot = jnp.sum(preference_combin_all, axis=-1) == 2\n mask_preference_combin_1 = jnp.all(preference_combin_all[:, :self.num_preferences // 2] == 0, axis=1)\n mask_preference_combin_2 = jnp.all(preference_combin_all[:, self.num_preferences // 2:] == 0, axis=1)\n\n mask_disconnected = (\n (mask_1_hot & mask_preference_combin_1) | (mask_1_hot & mask_preference_combin_2) | (\n mask_2_hot & mask_preference_combin_1) | (mask_2_hot & mask_preference_combin_2)\n )\n\n if self.task_support == \"connected\":\n mask_in_dist = mask_connected\n elif self.task_support == \"disconnected\":\n mask_in_dist = mask_disconnected\n\n mask_out_dist = ~(mask_connected | mask_disconnected)\n\n self.preference_in_dist = jnp.array(preference_combin_all[mask_in_dist])\n self.preference_out_dist = jnp.array(preference_combin_all[mask_out_dist])\n\n elif self.task_support == \"non_compositional\":\n # Non-compositional task support holds-out the last expert in the last layer\n mask_last_expert = preference_combin_all[:, -1] == 1\n self.preference_in_dist = jnp.array(preference_combin_all[~mask_last_expert])\n self.preference_out_dist = jnp.array(preference_combin_all[mask_last_expert])\n\n elif self.task_support == \"random\":\n # Randomly split task experts into in and out distribution tasks\n preference_combin_all = jax.random.permutation(self.rng, preference_combin_all)\n self.num_ood = int(len(preference_combin_all) * self.frac_ood)\n self.preference_in_dist = jnp.array(preference_combin_all[: -self.num_ood])\n self.preference_out_dist = jnp.array(preference_combin_all[-self.num_ood:])\n\n assert len(self.preference_in_dist) > 0\n assert len(self.preference_out_dist) > 0\n\n self.objects_all = jax.random.permutation(self.rng, np.arange(self.num_features))\n\n @partial(jnp.vectorize, excluded=(0,), signature=\"(n)->(m)\")\n def k_hot(self, ind):\n \"\"\"\n Convert a vector of indeces to a k-hot vector.\n Repeating an index does not change the result.\n \"\"\"\n return (jnp.sum(jax.nn.one_hot(ind, self.num_preferences), axis=0) > 0) * 1.0\n\n @property\n def num_actions(self) -> int:\n return len(ACTIONS)\n\n @property\n def observation_shape(self) -> Tuple[int]:\n return (*self.grid.shape, self.num_features + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n # Copied from hyperteacher\n rng_tasks, rng_weights = jax.random.split(rng)\n if mode in [\"test\", \"train\", \"ood\"]:\n task_experts = self.preference_out_dist if mode == \"ood\" else self.preference_in_dist\n task_ids = jax.random.choice(rng_tasks, len(task_experts), shape=())\n embeddings = task_experts[task_ids]\n\n if mode == \"ood\":\n task_ids += len(self.preference_in_dist)\n elif \"ood_\" in mode:\n hotness = int(mode.split(\"_\")[1])\n if hotness <= self.num_hot:\n # Filter the existing task_experts_out_dist for the given hotness\n task_ids = jax.random.choice(\n key=rng_tasks,\n a=len(self.preference_out_dist),\n p=1.0 * jnp.all(\n jnp.sum(self.preference_out_dist, axis=-1) == hotness, axis=-1\n ),\n shape=(),\n )\n embeddings = self.preference_out_dist[task_ids]\n elif hotness <= self.num_preferences:\n # Randomly sample task_experts - everything is ood here\n expert_indeces = jax.random.choice(rng_tasks, self.num_preferences, replace=False, shape=(hotness, ))\n embeddings = self.k_hot(expert_indeces)\n task_ids = -1 * jnp.ones(()) # No unique task IDs available here\n else:\n raise ValueError(f\"Invalid hotness {hotness}\")\n\n if self.continuous_combinations:\n # Sample weights uniformly from simplex (see Willms, 2021)\n weights = jax.random.exponential(rng_weights, shape=embeddings.shape)\n weights = weights * embeddings\n weights = weights / (jnp.sum(weights, axis=-1, keepdims=True) + 1)\n\n # Shift nonzero embeddings to the range [0.5, 1.0] to prevent adding further sparsity\n embeddings = (0.5 * weights + 0.5) * embeddings\n\n return embeddings, {\"task_id\": task_ids, \"embedding\": embeddings[None, :]}\n\n @partial(jax.jit, static_argnums=(0))\n def reset(\n self, rng: PRNGKey, goal: Array = None\n ) -> Tuple[PreferenceState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_preference, rng_distractor, rng_pos = jax.random.split(rng, 3)\n\n if goal is None:\n # Sample a preference from train distribution if None specified\n goal, _ = self.reset_goal(rng_preference, mode=\"train\")\n\n preference = goal\n\n # Sample distractors\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.objects_all,\n shape=(self.num_objects,),\n replace=True,\n )\n\n positions = jax.random.choice(\n rng_pos, self.free_coord, shape=(self.num_objects + 1,), replace=False\n )\n\n env_state = PreferenceState(\n done=False,\n timestep=0,\n positions=positions,\n features=distractors,\n available_distractors=jnp.ones((self.num_objects,)),\n preference=preference,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n return env_state, emission\n\n @partial(jax.jit, static_argnums=(0))\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[PreferenceState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1][0], env_state.positions[-1][1]\n distractors_pos = env_state.positions[:-1]\n features = env_state.features\n available_distractors = env_state.available_distractors\n\n preference = env_state.preference\n\n next_pos_agent, next_available_distractors, reward = self._move(\n pos_agent, features, available_distractors, distractors_pos, preference, action\n )\n next_timestep = env_state.timestep + 1\n # Update state\n env_state = PreferenceState(\n # If NOTHING is performed, the environment immediately terminates.\n done=jnp.logical_or(next_timestep > self.timelimit, action == ACTIONS.NOTHING.value),\n timestep=next_timestep,\n positions=env_state.positions.at[-1].set(next_pos_agent),\n features=env_state.features,\n available_distractors=next_available_distractors,\n preference=env_state.preference,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: PreferenceState) -> Array:\n distractor_idx = env_state.features\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros((*self.grid.shape, self.num_features + 2))\n\n grid = grid.at[\n (pos_objects[:, 0]),\n (pos_objects[:, 1]),\n distractor_idx,\n ].set(env_state.available_distractors)\n grid = grid.at[pos_agent[0], pos_agent[1], -2].set(\n 1.0\n ) # agent encoded in penultimate channel\n grid = grid.at[:, :, -1].set(self.grid) # walls encoded in last channel\n\n return grid\n\n def _idx_to_state(self, idx):\n grid_idx = idx // self.num_available_distractors_config\n distractor_config_idx = idx % self.num_available_distractors_config\n coord_packed = self.grid_idx_to_coord_matrix[grid_idx]\n coord = coord_packed // self.grid.shape[1], coord_packed % self.grid.shape[1]\n return coord, (((distractor_config_idx & (1 << np.arange(self.num_objects)))) > 0).astype(\n int\n )\n\n def _state_to_idx(self, coord, available_distractors):\n coord_packed = coord[0] * self.grid.shape[1] + coord[1]\n grid_idx = self.coord_matrix_to_grid_idx[coord_packed]\n distractor_config_idx = available_distractors @ (2 ** jnp.arange(self.num_objects))\n return (grid_idx * self.num_available_distractors_config + distractor_config_idx).astype(\n int\n )\n\n def _move(\n self, pos_agent, features, available_distractors, distractors_pos, preference, action\n ):\n delta_position = self._delta_position[action]\n next_position = pos_agent[0] + delta_position[0], pos_agent[1] + delta_position[1]\n # TODO(@simon): Remove boundary walls to save some input dim and check if within grid size bounds instead\n next_pos_grid = (\n jax.nn.one_hot(next_position[0], self.grid.shape[0])[..., None]\n * jax.nn.one_hot(next_position[1], self.grid.shape[1])[..., None].T\n )\n hit_wall = (self.grid * next_pos_grid).sum()\n next_position = jax.lax.cond(hit_wall, lambda _: pos_agent, lambda _: next_position, None)\n picked_distractor = (next_position[0] == distractors_pos[:, 0]) * (\n next_position[1] == distractors_pos[:, 1]\n )\n\n return (\n next_position,\n available_distractors * (1 - picked_distractor),\n (\n (picked_distractor * available_distractors)\n @ jax.nn.one_hot(features, self.num_features)\n @ self.preference_basis.T\n @ preference\n ),\n )\n\n @partial(jax.jit, static_argnums=(0))\n def demonstrate(self, rng, env_state):\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n action_value_init = jnp.zeros((self.num_states, self.num_actions))\n\n def next_idx_and_reward(idx, action):\n coord, available_distractors = self._idx_to_state(idx)\n next_coord, next_available_feature, reward = self._move(\n coord,\n env_state.features,\n available_distractors,\n env_state.positions[:-1],\n env_state.preference,\n action,\n )\n next_idx = self._state_to_idx(next_coord, next_available_feature)\n # Return the maximum value\n return next_idx, reward\n\n transition_map, reward_map = jax.vmap(\n jax.vmap(next_idx_and_reward, in_axes=(None, 0)), in_axes=(0, None)\n )(jnp.arange(self.num_states), jnp.arange(self.num_actions))\n\n def bellman_backup(action_value, t):\n def next_value(idx, action):\n next_idx = transition_map[idx, action]\n reward = reward_map[idx, action]\n # Return the maximum value\n return self.discount * action_value[next_idx].max() + reward\n\n next_action_value = jax.vmap(\n jax.vmap(next_value, in_axes=(None, 0)), in_axes=(0, None)\n )(jnp.arange(self.num_states), jnp.arange(self.num_actions))\n return next_action_value, None\n\n action_value, _ = jax.lax.scan(\n bellman_backup, action_value_init, jnp.arange(self.timelimit)\n )\n\n def env_step(carry, t):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n pos_agent = env_state.positions[-1]\n idx = self._state_to_idx(pos_agent, env_state.available_distractors)\n action = jnp.argmax(action_value[idx])\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), (emission, action_value[idx])\n\n (_, _), (trajectory, action_values) = jax.lax.scan(\n env_step, (rng, env_state), jnp.arange(self.timelimit)\n )\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission,\n trajectory,\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, action_values" }, { "identifier": "Dataloader", "path": "metax/data/base.py", "snippet": "class Dataloader(abc.ABC):\n def __init__(self, input_shape: Tuple[int], output_dim: int):\n self.input_shape = input_shape\n self.output_dim = output_dim\n\n @abc.abstractproperty\n def __len__(self):\n pass\n\n @abc.abstractproperty\n def sample_input(self):\n # Sample input should include batch dimension\n pass\n\n @abc.abstractmethod\n def __iter__(self):\n pass" }, { "identifier": "MetaDataset", "path": "metax/data/base.py", "snippet": "class MetaDataset(NamedTuple):\n train: Union[Dataset, MultitaskDataset]\n test: Union[Dataset, MultitaskDataset]" }, { "identifier": "MultitaskDataset", "path": "metax/data/base.py", "snippet": "class MultitaskDataset(NamedTuple):\n x: Array\n y: Array\n task_id: Array\n info: Dict = dict()" } ]
from functools import partial from typing import Optional from chex import PRNGKey from metax.data.envs.base import Environment from metax.data.envs.grid import CompositionalGrid from metax.data.envs.preference import CompositionalPreference from .base import Dataloader, MetaDataset, MultitaskDataset import jax import jax.numpy as jnp import jax.tree_util as jtu
11,188
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class ImitationMetaDataloader(Dataloader): def __init__( self, env: Environment, num_tasks: int, shots_train: int, shots_test: int, meta_batch_size: int, mode: str, train_test_split: bool, rng: PRNGKey, ): super().__init__(input_shape=env.observation_shape, output_dim=env.num_actions) self.env = env self.num_tasks = num_tasks self.shots_train = shots_train self.shots_test = shots_test self.meta_batch_size = meta_batch_size self.mode = mode self.train_test_split = train_test_split self.fixed_rng = rng assert num_tasks % meta_batch_size == 0, "num_tasks must be divisible by meta_batch_size" self.num_steps = num_tasks // meta_batch_size @property def sample_input(self): return jnp.zeros((1,) + self.env.observation_shape) def __len__(self): return self.num_steps def __iter__(self): for rng in jax.random.split(self.fixed_rng, self.num_steps): # Sample batch and wrap as MetaDataset rngs_batch = jax.random.split(rng, self.meta_batch_size) yield self.sample_metatask(rngs_batch) @partial(jax.jit, static_argnames="self") @partial(jax.vmap, in_axes=(None, 0)) def sample_metatask(self, rng: PRNGKey) -> MetaDataset: rng_goal, rng_task = jax.random.split(rng, 2) goal, info = self.env.reset_goal(rng_goal, mode=self.mode) @jax.vmap def sample_task(rng): rng_reset, rng_demo = jax.random.split(rng, 2) env_state, _ = self.env.reset(rng_reset, goal=goal) trajectory, actions = self.env.demonstrate(rng_demo, env_state) return MultitaskDataset( x=trajectory.observation, y=actions, task_id=jnp.full(actions.shape[:1], info["task_id"]), info={ "mask": ~trajectory.done, "embeddings": jnp.repeat(info["embedding"][None, :], actions.shape[0], axis=0), }, ) rngs_task = jax.random.split(rng_task, self.shots_train + self.shots_test) train_and_test_task = sample_task(rngs_task) if self.train_test_split: # Split into train and test set return MetaDataset( train=jtu.tree_map( lambda x: x[:self.shots_train].reshape(-1, *x.shape[2:]), train_and_test_task ), test=jtu.tree_map( lambda x: x[self.shots_train:].reshape(-1, *x.shape[2:]), train_and_test_task ), ) else: # No train_test split means, meta.train == meta.test set return MetaDataset( train=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), test=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), ) def create_imitation_metaloader( name, meta_batch_size, shots_train, shots_test, train_test_split, num_tasks_train, num_tasks_test, num_tasks_valid, num_tasks_ood: Optional[int] = None, seed=None, **kwargs, ): ood_sets_hot = None if name == "compositional_grid": env = CompositionalGrid( grid_size=kwargs["grid_size"], num_interactions=kwargs["num_interactions"], num_mazes=kwargs["num_mazes"], num_objects=kwargs["num_objects"], num_distractors=kwargs["num_distractors"], frac_ood=kwargs["frac_ood"], task_support=kwargs["task_support"], seed=seed, ) elif name == "compositional_preference": # Return the various OOD tasks for the compositional preference env. ood_sets_hot = jnp.arange(kwargs["num_hot"] + 1, kwargs["num_preferences"] + 1)
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class ImitationMetaDataloader(Dataloader): def __init__( self, env: Environment, num_tasks: int, shots_train: int, shots_test: int, meta_batch_size: int, mode: str, train_test_split: bool, rng: PRNGKey, ): super().__init__(input_shape=env.observation_shape, output_dim=env.num_actions) self.env = env self.num_tasks = num_tasks self.shots_train = shots_train self.shots_test = shots_test self.meta_batch_size = meta_batch_size self.mode = mode self.train_test_split = train_test_split self.fixed_rng = rng assert num_tasks % meta_batch_size == 0, "num_tasks must be divisible by meta_batch_size" self.num_steps = num_tasks // meta_batch_size @property def sample_input(self): return jnp.zeros((1,) + self.env.observation_shape) def __len__(self): return self.num_steps def __iter__(self): for rng in jax.random.split(self.fixed_rng, self.num_steps): # Sample batch and wrap as MetaDataset rngs_batch = jax.random.split(rng, self.meta_batch_size) yield self.sample_metatask(rngs_batch) @partial(jax.jit, static_argnames="self") @partial(jax.vmap, in_axes=(None, 0)) def sample_metatask(self, rng: PRNGKey) -> MetaDataset: rng_goal, rng_task = jax.random.split(rng, 2) goal, info = self.env.reset_goal(rng_goal, mode=self.mode) @jax.vmap def sample_task(rng): rng_reset, rng_demo = jax.random.split(rng, 2) env_state, _ = self.env.reset(rng_reset, goal=goal) trajectory, actions = self.env.demonstrate(rng_demo, env_state) return MultitaskDataset( x=trajectory.observation, y=actions, task_id=jnp.full(actions.shape[:1], info["task_id"]), info={ "mask": ~trajectory.done, "embeddings": jnp.repeat(info["embedding"][None, :], actions.shape[0], axis=0), }, ) rngs_task = jax.random.split(rng_task, self.shots_train + self.shots_test) train_and_test_task = sample_task(rngs_task) if self.train_test_split: # Split into train and test set return MetaDataset( train=jtu.tree_map( lambda x: x[:self.shots_train].reshape(-1, *x.shape[2:]), train_and_test_task ), test=jtu.tree_map( lambda x: x[self.shots_train:].reshape(-1, *x.shape[2:]), train_and_test_task ), ) else: # No train_test split means, meta.train == meta.test set return MetaDataset( train=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), test=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), ) def create_imitation_metaloader( name, meta_batch_size, shots_train, shots_test, train_test_split, num_tasks_train, num_tasks_test, num_tasks_valid, num_tasks_ood: Optional[int] = None, seed=None, **kwargs, ): ood_sets_hot = None if name == "compositional_grid": env = CompositionalGrid( grid_size=kwargs["grid_size"], num_interactions=kwargs["num_interactions"], num_mazes=kwargs["num_mazes"], num_objects=kwargs["num_objects"], num_distractors=kwargs["num_distractors"], frac_ood=kwargs["frac_ood"], task_support=kwargs["task_support"], seed=seed, ) elif name == "compositional_preference": # Return the various OOD tasks for the compositional preference env. ood_sets_hot = jnp.arange(kwargs["num_hot"] + 1, kwargs["num_preferences"] + 1)
env = CompositionalPreference(
2
2023-12-22 16:35:49+00:00
16k
AContesini/Convert_PDF_to_DOCX_or_vice-versa
venv/Lib/site-packages/tqdm/cli.py
[ { "identifier": "TqdmKeyError", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class TqdmKeyError(KeyError):\n pass" }, { "identifier": "TqdmTypeError", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class TqdmTypeError(TypeError):\n pass" }, { "identifier": "tqdm", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class tqdm(Comparable):\n \"\"\"\n Decorate an iterable object, returning an iterator which acts exactly\n like the original iterable, but prints a dynamically updating\n progressbar every time a value is requested.\n\n Parameters\n ----------\n iterable : iterable, optional\n Iterable to decorate with a progressbar.\n Leave blank to manually manage the updates.\n desc : str, optional\n Prefix for the progressbar.\n total : int or float, optional\n The number of expected iterations. If unspecified,\n len(iterable) is used if possible. If float(\"inf\") or as a last\n resort, only basic progress statistics are displayed\n (no ETA, no progressbar).\n If `gui` is True and this parameter needs subsequent updating,\n specify an initial arbitrary large positive number,\n e.g. 9e9.\n leave : bool, optional\n If [default: True], keeps all traces of the progressbar\n upon termination of iteration.\n If `None`, will leave only if `position` is `0`.\n file : `io.TextIOWrapper` or `io.StringIO`, optional\n Specifies where to output the progress messages\n (default: sys.stderr). Uses `file.write(str)` and `file.flush()`\n methods. For encoding, see `write_bytes`.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes the progressbar to stay within this bound.\n If unspecified, attempts to use environment width. The\n fallback is a meter width of 10 and no limit for the counter and\n statistics. If 0, will not print any meter (only stats).\n mininterval : float, optional\n Minimum progress display update interval [default: 0.1] seconds.\n maxinterval : float, optional\n Maximum progress display update interval [default: 10] seconds.\n Automatically adjusts `miniters` to correspond to `mininterval`\n after long display update lag. Only works if `dynamic_miniters`\n or monitor thread is enabled.\n miniters : int or float, optional\n Minimum progress display update interval, in iterations.\n If 0 and `dynamic_miniters`, will automatically adjust to equal\n `mininterval` (more CPU efficient, good for tight loops).\n If > 0, will skip display of specified number of iterations.\n Tweak this and `mininterval` to get very efficient loops.\n If your progress is erratic with both fast and slow iterations\n (network, skipping items, etc) you should set miniters=1.\n ascii : bool or str, optional\n If unspecified or False, use unicode (smooth blocks) to fill\n the meter. The fallback is to use ASCII characters \" 123456789#\".\n disable : bool, optional\n Whether to disable the entire progressbar wrapper\n [default: False]. If set to None, disable on non-TTY.\n unit : str, optional\n String that will be used to define the unit of each iteration\n [default: it].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be reduced/scaled\n automatically and a metric prefix following the\n International System of Units standard will be added\n (kilo, mega, etc.) [default: False]. If any other non-zero\n number, will scale `total` and `n`.\n dynamic_ncols : bool, optional\n If set, constantly alters `ncols` and `nrows` to the\n environment (allowing for window resizes) [default: False].\n smoothing : float, optional\n Exponential moving average smoothing factor for speed estimates\n (ignored in GUI mode). Ranges from 0 (average speed) to 1\n (current/instantaneous speed) [default: 0.3].\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n initial : int or float, optional\n The initial counter value. Useful when restarting a progress\n bar [default: 0]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n position : int, optional\n Specify the line offset to print this bar (starting from 0)\n Automatic if unspecified.\n Useful to manage multiple bars at once (eg, from threads).\n postfix : dict or *, optional\n Specify additional stats to display at the end of the bar.\n Calls `set_postfix(**postfix)` if possible (dict).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n write_bytes : bool, optional\n Whether to write bytes. If (default: False) will write unicode.\n lock_args : tuple, optional\n Passed to `refresh` for intermediate output\n (initialisation, iterating, and updating).\n nrows : int, optional\n The screen height. If specified, hides nested bars outside this\n bound. If unspecified, attempts to use environment height.\n The fallback is 20.\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n delay : float, optional\n Don't display until [default: 0] seconds have elapsed.\n gui : bool, optional\n WARNING: internal parameter - do not use.\n Use tqdm.gui.tqdm(...) instead. If set, will attempt to use\n matplotlib animations for a graphical output [default: False].\n\n Returns\n -------\n out : decorated iterator.\n \"\"\"\n\n monitor_interval = 10 # set to 0 to disable the thread\n monitor = None\n _instances = WeakSet()\n\n @staticmethod\n def format_sizeof(num, suffix='', divisor=1000):\n \"\"\"\n Formats a number (greater than unity) with SI Order of Magnitude\n prefixes.\n\n Parameters\n ----------\n num : float\n Number ( >= 1) to format.\n suffix : str, optional\n Post-postfix [default: ''].\n divisor : float, optional\n Divisor between prefixes [default: 1000].\n\n Returns\n -------\n out : str\n Number with Order of Magnitude SI unit postfix.\n \"\"\"\n for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 999.5:\n if abs(num) < 99.95:\n if abs(num) < 9.995:\n return '{0:1.2f}'.format(num) + unit + suffix\n return '{0:2.1f}'.format(num) + unit + suffix\n return '{0:3.0f}'.format(num) + unit + suffix\n num /= divisor\n return '{0:3.1f}Y'.format(num) + suffix\n\n @staticmethod\n def format_interval(t):\n \"\"\"\n Formats a number of seconds as a clock time, [H:]MM:SS\n\n Parameters\n ----------\n t : int\n Number of seconds.\n\n Returns\n -------\n out : str\n [H:]MM:SS\n \"\"\"\n mins, s = divmod(int(t), 60)\n h, m = divmod(mins, 60)\n if h:\n return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)\n else:\n return '{0:02d}:{1:02d}'.format(m, s)\n\n @staticmethod\n def format_num(n):\n \"\"\"\n Intelligent scientific notation (.3g).\n\n Parameters\n ----------\n n : int or float or Numeric\n A Number.\n\n Returns\n -------\n out : str\n Formatted number.\n \"\"\"\n f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')\n n = str(n)\n return f if len(f) < len(n) else n\n\n @staticmethod\n def status_printer(file):\n \"\"\"\n Manage the printing and in-place updating of a line of characters.\n Note that if the string is longer than a line, then in-place\n updating may not work (it will print a new line at each refresh).\n \"\"\"\n fp = file\n fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover\n if fp in (sys.stderr, sys.stdout):\n getattr(sys.stderr, 'flush', lambda: None)()\n getattr(sys.stdout, 'flush', lambda: None)()\n\n def fp_write(s):\n fp.write(str(s))\n fp_flush()\n\n last_len = [0]\n\n def print_status(s):\n len_s = disp_len(s)\n fp_write('\\r' + s + (' ' * max(last_len[0] - len_s, 0)))\n last_len[0] = len_s\n\n return print_status\n\n @staticmethod\n def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',\n unit_scale=False, rate=None, bar_format=None, postfix=None,\n unit_divisor=1000, initial=0, colour=None, **extra_kwargs):\n \"\"\"\n Return a string-based progress bar given some parameters\n\n Parameters\n ----------\n n : int or float\n Number of finished iterations.\n total : int or float\n The expected total number of iterations. If meaningless (None),\n only basic progress statistics are displayed (no ETA).\n elapsed : float\n Number of seconds passed since start.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes `{bar}` to stay within this bound\n [default: None]. If `0`, will not print any bar (only stats).\n The fallback is `{bar:10}`.\n prefix : str, optional\n Prefix message (included in total width) [default: ''].\n Use as {desc} in bar_format string.\n ascii : bool, optional or str, optional\n If not set, use unicode (smooth blocks) to fill the meter\n [default: False]. The fallback is to use ASCII characters\n \" 123456789#\".\n unit : str, optional\n The iteration unit [default: 'it'].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be printed with an\n appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)\n [default: False]. If any other non-zero number, will scale\n `total` and `n`.\n rate : float, optional\n Manual override for iteration rate.\n If [default: None], uses n/elapsed.\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n postfix : *, optional\n Similar to `prefix`, but placed at the end\n (e.g. for additional stats).\n Note: postfix is usually a string (not a dict) for this method,\n and will if possible be set to postfix = ', ' + postfix.\n However other types are supported (#382).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n initial : int or float, optional\n The initial counter value [default: 0].\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n\n Returns\n -------\n out : Formatted meter and stats, ready to display.\n \"\"\"\n\n # sanity check: total\n if total and n >= (total + 0.5): # allow float imprecision (#849)\n total = None\n\n # apply custom scale if necessary\n if unit_scale and unit_scale not in (True, 1):\n if total:\n total *= unit_scale\n n *= unit_scale\n if rate:\n rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt\n unit_scale = False\n\n elapsed_str = tqdm.format_interval(elapsed)\n\n # if unspecified, attempt to use rate = average speed\n # (we allow manual override since predicting time is an arcane art)\n if rate is None and elapsed:\n rate = (n - initial) / elapsed\n inv_rate = 1 / rate if rate else None\n format_sizeof = tqdm.format_sizeof\n rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else\n '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'\n rate_inv_fmt = (\n (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))\n if inv_rate else '?') + 's/' + unit\n rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt\n\n if unit_scale:\n n_fmt = format_sizeof(n, divisor=unit_divisor)\n total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'\n else:\n n_fmt = str(n)\n total_fmt = str(total) if total is not None else '?'\n\n try:\n postfix = ', ' + postfix if postfix else ''\n except TypeError:\n pass\n\n remaining = (total - n) / rate if rate and total else 0\n remaining_str = tqdm.format_interval(remaining) if rate else '?'\n try:\n eta_dt = (datetime.now() + timedelta(seconds=remaining)\n if rate and total else datetime.utcfromtimestamp(0))\n except OverflowError:\n eta_dt = datetime.max\n\n # format the stats displayed to the left and right sides of the bar\n if prefix:\n # old prefix setup work around\n bool_prefix_colon_already = (prefix[-2:] == \": \")\n l_bar = prefix if bool_prefix_colon_already else prefix + \": \"\n else:\n l_bar = ''\n\n r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'\n\n # Custom bar formatting\n # Populate a dict with all available progress indicators\n format_dict = {\n # slight extension of self.format_dict\n 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,\n 'elapsed': elapsed_str, 'elapsed_s': elapsed,\n 'ncols': ncols, 'desc': prefix or '', 'unit': unit,\n 'rate': inv_rate if inv_rate and inv_rate > 1 else rate,\n 'rate_fmt': rate_fmt, 'rate_noinv': rate,\n 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,\n 'rate_inv_fmt': rate_inv_fmt,\n 'postfix': postfix, 'unit_divisor': unit_divisor,\n 'colour': colour,\n # plus more useful definitions\n 'remaining': remaining_str, 'remaining_s': remaining,\n 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,\n **extra_kwargs}\n\n # total is known: we can predict some stats\n if total:\n # fractional and percentage progress\n frac = n / total\n percentage = frac * 100\n\n l_bar += '{0:3.0f}%|'.format(percentage)\n\n if ncols == 0:\n return l_bar[:-1] + r_bar[1:]\n\n format_dict.update(l_bar=l_bar)\n if bar_format:\n format_dict.update(percentage=percentage)\n\n # auto-remove colon for empty `{desc}`\n if not prefix:\n bar_format = bar_format.replace(\"{desc}: \", '')\n else:\n bar_format = \"{l_bar}{bar}{r_bar}\"\n\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar # no `{bar}`; nothing else to do\n\n # Formatting progress bar space available for bar's display\n full_bar = Bar(frac,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,\n colour=colour)\n if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):\n bar_format = str(bar_format)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n\n elif bar_format:\n # user-specified bar_format but no total\n l_bar += '|'\n format_dict.update(l_bar=l_bar, percentage=0)\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar\n full_bar = Bar(0,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.BLANK, colour=colour)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n else:\n # no total: no progressbar, ETA, just progress stats\n return (f'{(prefix + \": \") if prefix else \"\"}'\n f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')\n\n def __new__(cls, *_, **__):\n instance = object.__new__(cls)\n with cls.get_lock(): # also constructs lock if non-existent\n cls._instances.add(instance)\n # create monitoring thread\n if cls.monitor_interval and (cls.monitor is None\n or not cls.monitor.report()):\n try:\n cls.monitor = TMonitor(cls, cls.monitor_interval)\n except Exception as e: # pragma: nocover\n warn(\"tqdm:disabling monitor support\"\n \" (monitor_interval = 0) due to:\\n\" + str(e),\n TqdmMonitorWarning, stacklevel=2)\n cls.monitor_interval = 0\n return instance\n\n @classmethod\n def _get_free_pos(cls, instance=None):\n \"\"\"Skips specified instance.\"\"\"\n positions = {abs(inst.pos) for inst in cls._instances\n if inst is not instance and hasattr(inst, \"pos\")}\n return min(set(range(len(positions) + 1)).difference(positions))\n\n @classmethod\n def _decr_instances(cls, instance):\n \"\"\"\n Remove from list and reposition another unfixed bar\n to fill the new gap.\n\n This means that by default (where all nested bars are unfixed),\n order is not maintained but screen flicker/blank space is minimised.\n (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)\n \"\"\"\n with cls._lock:\n try:\n cls._instances.remove(instance)\n except KeyError:\n # if not instance.gui: # pragma: no cover\n # raise\n pass # py2: maybe magically removed already\n # else:\n if not instance.gui:\n last = (instance.nrows or 20) - 1\n # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)\n instances = list(filter(\n lambda i: hasattr(i, \"pos\") and last <= i.pos,\n cls._instances))\n # set first found to current `pos`\n if instances:\n inst = min(instances, key=lambda i: i.pos)\n inst.clear(nolock=True)\n inst.pos = abs(instance.pos)\n\n @classmethod\n def write(cls, s, file=None, end=\"\\n\", nolock=False):\n \"\"\"Print a message via tqdm (without overlap with bars).\"\"\"\n fp = file if file is not None else sys.stdout\n with cls.external_write_mode(file=file, nolock=nolock):\n # Write the message\n fp.write(s)\n fp.write(end)\n\n @classmethod\n @contextmanager\n def external_write_mode(cls, file=None, nolock=False):\n \"\"\"\n Disable tqdm within context and refresh tqdm when exits.\n Useful when writing to standard output stream\n \"\"\"\n fp = file if file is not None else sys.stdout\n\n try:\n if not nolock:\n cls.get_lock().acquire()\n # Clear all bars\n inst_cleared = []\n for inst in getattr(cls, '_instances', []):\n # Clear instance if in the target output file\n # or if write output + tqdm output are both either\n # sys.stdout or sys.stderr (because both are mixed in terminal)\n if hasattr(inst, \"start_t\") and (inst.fp == fp or all(\n f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):\n inst.clear(nolock=True)\n inst_cleared.append(inst)\n yield\n # Force refresh display of bars we cleared\n for inst in inst_cleared:\n inst.refresh(nolock=True)\n finally:\n if not nolock:\n cls._lock.release()\n\n @classmethod\n def set_lock(cls, lock):\n \"\"\"Set the global lock.\"\"\"\n cls._lock = lock\n\n @classmethod\n def get_lock(cls):\n \"\"\"Get the global lock. Construct it if it does not exist.\"\"\"\n if not hasattr(cls, '_lock'):\n cls._lock = TqdmDefaultWriteLock()\n return cls._lock\n\n @classmethod\n def pandas(cls, **tqdm_kwargs):\n \"\"\"\n Registers the current `tqdm` class with\n pandas.core.\n ( frame.DataFrame\n | series.Series\n | groupby.(generic.)DataFrameGroupBy\n | groupby.(generic.)SeriesGroupBy\n ).progress_apply\n\n A new instance will be created every time `progress_apply` is called,\n and each instance will automatically `close()` upon completion.\n\n Parameters\n ----------\n tqdm_kwargs : arguments for the tqdm instance\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from tqdm import tqdm\n >>> from tqdm.gui import tqdm as tqdm_gui\n >>>\n >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))\n >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc\n >>> # Now you can use `progress_apply` instead of `apply`\n >>> df.groupby(0).progress_apply(lambda x: x**2)\n\n References\n ----------\n <https://stackoverflow.com/questions/18603270/\\\n progress-indicator-during-pandas-operations-python>\n \"\"\"\n from warnings import catch_warnings, simplefilter\n\n from pandas.core.frame import DataFrame\n from pandas.core.series import Series\n try:\n with catch_warnings():\n simplefilter(\"ignore\", category=FutureWarning)\n from pandas import Panel\n except ImportError: # pandas>=1.2.0\n Panel = None\n Rolling, Expanding = None, None\n try: # pandas>=1.0.0\n from pandas.core.window.rolling import _Rolling_and_Expanding\n except ImportError:\n try: # pandas>=0.18.0\n from pandas.core.window import _Rolling_and_Expanding\n except ImportError: # pandas>=1.2.0\n try: # pandas>=1.2.0\n from pandas.core.window.expanding import Expanding\n from pandas.core.window.rolling import Rolling\n _Rolling_and_Expanding = Rolling, Expanding\n except ImportError: # pragma: no cover\n _Rolling_and_Expanding = None\n try: # pandas>=0.25.0\n from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy\n from pandas.core.groupby.generic import DataFrameGroupBy\n except ImportError: # pragma: no cover\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy\n except ImportError:\n from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import GroupBy\n except ImportError: # pragma: no cover\n from pandas.core.groupby import GroupBy\n\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import PanelGroupBy\n except ImportError:\n try:\n from pandas.core.groupby import PanelGroupBy\n except ImportError: # pandas>=0.25.0\n PanelGroupBy = None\n\n tqdm_kwargs = tqdm_kwargs.copy()\n deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]\n\n def inner_generator(df_function='apply'):\n def inner(df, func, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n df : (DataFrame|Series)[GroupBy]\n Data (may be grouped).\n func : function\n To be applied on the (grouped) data.\n **kwargs : optional\n Transmitted to `df.apply()`.\n \"\"\"\n\n # Precompute total iterations\n total = tqdm_kwargs.pop(\"total\", getattr(df, 'ngroups', None))\n if total is None: # not grouped\n if df_function == 'applymap':\n total = df.size\n elif isinstance(df, Series):\n total = len(df)\n elif (_Rolling_and_Expanding is None or\n not isinstance(df, _Rolling_and_Expanding)):\n # DataFrame or Panel\n axis = kwargs.get('axis', 0)\n if axis == 'index':\n axis = 0\n elif axis == 'columns':\n axis = 1\n # when axis=0, total is shape[axis1]\n total = df.size // df.shape[axis]\n\n # Init bar\n if deprecated_t[0] is not None:\n t = deprecated_t[0]\n deprecated_t[0] = None\n else:\n t = cls(total=total, **tqdm_kwargs)\n\n if len(args) > 0:\n # *args intentionally not supported (see #244, #299)\n TqdmDeprecationWarning(\n \"Except func, normal arguments are intentionally\" +\n \" not supported by\" +\n \" `(DataFrame|Series|GroupBy).progress_apply`.\" +\n \" Use keyword arguments instead.\",\n fp_write=getattr(t.fp, 'write', sys.stderr.write))\n\n try: # pandas>=1.3.0\n from pandas.core.common import is_builtin_func\n except ImportError:\n is_builtin_func = df._is_builtin_func\n try:\n func = is_builtin_func(func)\n except TypeError:\n pass\n\n # Define bar updating wrapper\n def wrapper(*args, **kwargs):\n # update tbar correctly\n # it seems `pandas apply` calls `func` twice\n # on the first column/row to decide whether it can\n # take a fast or slow code path; so stop when t.total==t.n\n t.update(n=1 if not t.total or t.n < t.total else 0)\n return func(*args, **kwargs)\n\n # Apply the provided function (in **kwargs)\n # on the df using our wrapper (which provides bar updating)\n try:\n return getattr(df, df_function)(wrapper, **kwargs)\n finally:\n t.close()\n\n return inner\n\n # Monkeypatch pandas to provide easy methods\n # Enable custom tqdm progress in pandas!\n Series.progress_apply = inner_generator()\n SeriesGroupBy.progress_apply = inner_generator()\n Series.progress_map = inner_generator('map')\n SeriesGroupBy.progress_map = inner_generator('map')\n\n DataFrame.progress_apply = inner_generator()\n DataFrameGroupBy.progress_apply = inner_generator()\n DataFrame.progress_applymap = inner_generator('applymap')\n\n if Panel is not None:\n Panel.progress_apply = inner_generator()\n if PanelGroupBy is not None:\n PanelGroupBy.progress_apply = inner_generator()\n\n GroupBy.progress_apply = inner_generator()\n GroupBy.progress_aggregate = inner_generator('aggregate')\n GroupBy.progress_transform = inner_generator('transform')\n\n if Rolling is not None and Expanding is not None:\n Rolling.progress_apply = inner_generator()\n Expanding.progress_apply = inner_generator()\n elif _Rolling_and_Expanding is not None:\n _Rolling_and_Expanding.progress_apply = inner_generator()\n\n # override defaults via env vars\n @envwrap(\"TQDM_\", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,\n 'position': int, 'nrows': int})\n def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,\n ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,\n ascii=None, disable=False, unit='it', unit_scale=False,\n dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,\n position=None, postfix=None, unit_divisor=1000, write_bytes=False,\n lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,\n **kwargs):\n \"\"\"see tqdm.tqdm for arguments\"\"\"\n if file is None:\n file = sys.stderr\n\n if write_bytes:\n # Despite coercing unicode into bytes, py2 sys.std* streams\n # should have bytes written to them.\n file = SimpleTextIOWrapper(\n file, encoding=getattr(file, 'encoding', None) or 'utf-8')\n\n file = DisableOnWriteError(file, tqdm_instance=self)\n\n if disable is None and hasattr(file, \"isatty\") and not file.isatty():\n disable = True\n\n if total is None and iterable is not None:\n try:\n total = len(iterable)\n except (TypeError, AttributeError):\n total = None\n if total == float(\"inf\"):\n # Infinite iterations, behave same as unknown\n total = None\n\n if disable:\n self.iterable = iterable\n self.disable = disable\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n self.n = initial\n self.total = total\n self.leave = leave\n return\n\n if kwargs:\n self.disable = True\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n raise (\n TqdmDeprecationWarning(\n \"`nested` is deprecated and automated.\\n\"\n \"Use `position` instead for manual control.\\n\",\n fp_write=getattr(file, 'write', sys.stderr.write))\n if \"nested\" in kwargs else\n TqdmKeyError(\"Unknown argument(s): \" + str(kwargs)))\n\n # Preprocess the arguments\n if (\n (ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))\n ) or dynamic_ncols: # pragma: no cover\n if dynamic_ncols:\n dynamic_ncols = _screen_shape_wrapper()\n if dynamic_ncols:\n ncols, nrows = dynamic_ncols(file)\n else:\n _dynamic_ncols = _screen_shape_wrapper()\n if _dynamic_ncols:\n _ncols, _nrows = _dynamic_ncols(file)\n if ncols is None:\n ncols = _ncols\n if nrows is None:\n nrows = _nrows\n\n if miniters is None:\n miniters = 0\n dynamic_miniters = True\n else:\n dynamic_miniters = False\n\n if mininterval is None:\n mininterval = 0\n\n if maxinterval is None:\n maxinterval = 0\n\n if ascii is None:\n ascii = not _supports_unicode(file)\n\n if bar_format and ascii is not True and not _is_ascii(ascii):\n # Convert bar format into unicode since terminal uses unicode\n bar_format = str(bar_format)\n\n if smoothing is None:\n smoothing = 0\n\n # Store the arguments\n self.iterable = iterable\n self.desc = desc or ''\n self.total = total\n self.leave = leave\n self.fp = file\n self.ncols = ncols\n self.nrows = nrows\n self.mininterval = mininterval\n self.maxinterval = maxinterval\n self.miniters = miniters\n self.dynamic_miniters = dynamic_miniters\n self.ascii = ascii\n self.disable = disable\n self.unit = unit\n self.unit_scale = unit_scale\n self.unit_divisor = unit_divisor\n self.initial = initial\n self.lock_args = lock_args\n self.delay = delay\n self.gui = gui\n self.dynamic_ncols = dynamic_ncols\n self.smoothing = smoothing\n self._ema_dn = EMA(smoothing)\n self._ema_dt = EMA(smoothing)\n self._ema_miniters = EMA(smoothing)\n self.bar_format = bar_format\n self.postfix = None\n self.colour = colour\n self._time = time\n if postfix:\n try:\n self.set_postfix(refresh=False, **postfix)\n except TypeError:\n self.postfix = postfix\n\n # Init the iterations counters\n self.last_print_n = initial\n self.n = initial\n\n # if nested, at initial sp() call we replace '\\r' by '\\n' to\n # not overwrite the outer progress bar\n with self._lock:\n # mark fixed positions as negative\n self.pos = self._get_free_pos(self) if position is None else -position\n\n if not gui:\n # Initialize the screen printer\n self.sp = self.status_printer(self.fp)\n if delay <= 0:\n self.refresh(lock_args=self.lock_args)\n\n # Init the time counter\n self.last_print_t = self._time()\n # NB: Avoid race conditions by setting start_t at the very end of init\n self.start_t = self.last_print_t\n\n def __bool__(self):\n if self.total is not None:\n return self.total > 0\n if self.iterable is None:\n raise TypeError('bool() undefined when iterable == total == None')\n return bool(self.iterable)\n\n def __len__(self):\n return (\n self.total if self.iterable is None\n else self.iterable.shape[0] if hasattr(self.iterable, \"shape\")\n else len(self.iterable) if hasattr(self.iterable, \"__len__\")\n else self.iterable.__length_hint__() if hasattr(self.iterable, \"__length_hint__\")\n else getattr(self, \"total\", None))\n\n def __reversed__(self):\n try:\n orig = self.iterable\n except AttributeError:\n raise TypeError(\"'tqdm' object is not reversible\")\n else:\n self.iterable = reversed(self.iterable)\n return self.__iter__()\n finally:\n self.iterable = orig\n\n def __contains__(self, item):\n contains = getattr(self.iterable, '__contains__', None)\n return contains(item) if contains is not None else item in self.__iter__()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n self.close()\n except AttributeError:\n # maybe eager thread cleanup upon external error\n if (exc_type, exc_value, traceback) == (None, None, None):\n raise\n warn(\"AttributeError ignored\", TqdmWarning, stacklevel=2)\n\n def __del__(self):\n self.close()\n\n def __str__(self):\n return self.format_meter(**self.format_dict)\n\n @property\n def _comparable(self):\n return abs(getattr(self, \"pos\", 1 << 31))\n\n def __hash__(self):\n return id(self)\n\n def __iter__(self):\n \"\"\"Backward-compatibility to use: for x in tqdm(iterable)\"\"\"\n\n # Inlining instance variables as locals (speed optimisation)\n iterable = self.iterable\n\n # If the bar is disabled, then just walk the iterable\n # (note: keep this check outside the loop for performance)\n if self.disable:\n for obj in iterable:\n yield obj\n return\n\n mininterval = self.mininterval\n last_print_t = self.last_print_t\n last_print_n = self.last_print_n\n min_start_t = self.start_t + self.delay\n n = self.n\n time = self._time\n\n try:\n for obj in iterable:\n yield obj\n # Update and possibly print the progressbar.\n # Note: does not call self.update(1) for speed optimisation.\n n += 1\n\n if n - last_print_n >= self.miniters:\n cur_t = time()\n dt = cur_t - last_print_t\n if dt >= mininterval and cur_t >= min_start_t:\n self.update(n - last_print_n)\n last_print_n = self.last_print_n\n last_print_t = self.last_print_t\n finally:\n self.n = n\n self.close()\n\n def update(self, n=1):\n \"\"\"\n Manually update the progress bar, useful for streams\n such as reading files.\n E.g.:\n >>> t = tqdm(total=filesize) # Initialise\n >>> for current_buffer in stream:\n ... ...\n ... t.update(len(current_buffer))\n >>> t.close()\n The last line is highly recommended, but possibly not necessary if\n `t.update()` will be called in such a way that `filesize` will be\n exactly reached and printed.\n\n Parameters\n ----------\n n : int or float, optional\n Increment to add to the internal counter of iterations\n [default: 1]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n\n Returns\n -------\n out : bool or None\n True if a `display()` was triggered.\n \"\"\"\n if self.disable:\n return\n\n if n < 0:\n self.last_print_n += n # for auto-refresh logic to work\n self.n += n\n\n # check counter first to reduce calls to time()\n if self.n - self.last_print_n >= self.miniters:\n cur_t = self._time()\n dt = cur_t - self.last_print_t\n if dt >= self.mininterval and cur_t >= self.start_t + self.delay:\n cur_t = self._time()\n dn = self.n - self.last_print_n # >= n\n if self.smoothing and dt and dn:\n # EMA (not just overall average)\n self._ema_dn(dn)\n self._ema_dt(dt)\n self.refresh(lock_args=self.lock_args)\n if self.dynamic_miniters:\n # If no `miniters` was specified, adjust automatically to the\n # maximum iteration rate seen so far between two prints.\n # e.g.: After running `tqdm.update(5)`, subsequent\n # calls to `tqdm.update()` will only cause an update after\n # at least 5 more iterations.\n if self.maxinterval and dt >= self.maxinterval:\n self.miniters = dn * (self.mininterval or self.maxinterval) / dt\n elif self.smoothing:\n # EMA miniters update\n self.miniters = self._ema_miniters(\n dn * (self.mininterval / dt if self.mininterval and dt\n else 1))\n else:\n # max iters between two prints\n self.miniters = max(self.miniters, dn)\n\n # Store old values for next call\n self.last_print_n = self.n\n self.last_print_t = cur_t\n return True\n\n def close(self):\n \"\"\"Cleanup and (if leave=False) close the progressbar.\"\"\"\n if self.disable:\n return\n\n # Prevent multiple closures\n self.disable = True\n\n # decrement instance pos and remove from internal set\n pos = abs(self.pos)\n self._decr_instances(self)\n\n if self.last_print_t < self.start_t + self.delay:\n # haven't ever displayed; nothing to clear\n return\n\n # GUI mode\n if getattr(self, 'sp', None) is None:\n return\n\n # annoyingly, _supports_unicode isn't good enough\n def fp_write(s):\n self.fp.write(str(s))\n\n try:\n fp_write('')\n except ValueError as e:\n if 'closed' in str(e):\n return\n raise # pragma: no cover\n\n leave = pos == 0 if self.leave is None else self.leave\n\n with self._lock:\n if leave:\n # stats for overall rate (no weighted average)\n self._ema_dt = lambda: None\n self.display(pos=0)\n fp_write('\\n')\n else:\n # clear previous display\n if self.display(msg='', pos=pos) and not pos:\n fp_write('\\r')\n\n def clear(self, nolock=False):\n \"\"\"Clear current bar display.\"\"\"\n if self.disable:\n return\n\n if not nolock:\n self._lock.acquire()\n pos = abs(self.pos)\n if pos < (self.nrows or 20):\n self.moveto(pos)\n self.sp('')\n self.fp.write('\\r') # place cursor back at the beginning of line\n self.moveto(-pos)\n if not nolock:\n self._lock.release()\n\n def refresh(self, nolock=False, lock_args=None):\n \"\"\"\n Force refresh the display of this bar.\n\n Parameters\n ----------\n nolock : bool, optional\n If `True`, does not lock.\n If [default: `False`]: calls `acquire()` on internal lock.\n lock_args : tuple, optional\n Passed to internal lock's `acquire()`.\n If specified, will only `display()` if `acquire()` returns `True`.\n \"\"\"\n if self.disable:\n return\n\n if not nolock:\n if lock_args:\n if not self._lock.acquire(*lock_args):\n return False\n else:\n self._lock.acquire()\n self.display()\n if not nolock:\n self._lock.release()\n return True\n\n def unpause(self):\n \"\"\"Restart tqdm timer from last print time.\"\"\"\n if self.disable:\n return\n cur_t = self._time()\n self.start_t += cur_t - self.last_print_t\n self.last_print_t = cur_t\n\n def reset(self, total=None):\n \"\"\"\n Resets to 0 iterations for repeated use.\n\n Consider combining with `leave=True`.\n\n Parameters\n ----------\n total : int or float, optional. Total to use for the new bar.\n \"\"\"\n self.n = 0\n if total is not None:\n self.total = total\n if self.disable:\n return\n self.last_print_n = 0\n self.last_print_t = self.start_t = self._time()\n self._ema_dn = EMA(self.smoothing)\n self._ema_dt = EMA(self.smoothing)\n self._ema_miniters = EMA(self.smoothing)\n self.refresh()\n\n def set_description(self, desc=None, refresh=True):\n \"\"\"\n Set/modify description of the progress bar.\n\n Parameters\n ----------\n desc : str, optional\n refresh : bool, optional\n Forces refresh [default: True].\n \"\"\"\n self.desc = desc + ': ' if desc else ''\n if refresh:\n self.refresh()\n\n def set_description_str(self, desc=None, refresh=True):\n \"\"\"Set/modify description without ': ' appended.\"\"\"\n self.desc = desc or ''\n if refresh:\n self.refresh()\n\n def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):\n \"\"\"\n Set/modify postfix (additional stats)\n with automatic formatting based on datatype.\n\n Parameters\n ----------\n ordered_dict : dict or OrderedDict, optional\n refresh : bool, optional\n Forces refresh [default: True].\n kwargs : dict, optional\n \"\"\"\n # Sort in alphabetical order to be more deterministic\n postfix = OrderedDict([] if ordered_dict is None else ordered_dict)\n for key in sorted(kwargs.keys()):\n postfix[key] = kwargs[key]\n # Preprocess stats according to datatype\n for key in postfix.keys():\n # Number: limit the length of the string\n if isinstance(postfix[key], Number):\n postfix[key] = self.format_num(postfix[key])\n # Else for any other type, try to get the string conversion\n elif not isinstance(postfix[key], str):\n postfix[key] = str(postfix[key])\n # Else if it's a string, don't need to preprocess anything\n # Stitch together to get the final postfix\n self.postfix = ', '.join(key + '=' + postfix[key].strip()\n for key in postfix.keys())\n if refresh:\n self.refresh()\n\n def set_postfix_str(self, s='', refresh=True):\n \"\"\"\n Postfix without dictionary expansion, similar to prefix handling.\n \"\"\"\n self.postfix = str(s)\n if refresh:\n self.refresh()\n\n def moveto(self, n):\n # TODO: private method\n self.fp.write('\\n' * n + _term_move_up() * -n)\n getattr(self.fp, 'flush', lambda: None)()\n\n @property\n def format_dict(self):\n \"\"\"Public API for read-only member access.\"\"\"\n if self.disable and not hasattr(self, 'unit'):\n return defaultdict(lambda: None, {\n 'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})\n if self.dynamic_ncols:\n self.ncols, self.nrows = self.dynamic_ncols(self.fp)\n return {\n 'n': self.n, 'total': self.total,\n 'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,\n 'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,\n 'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,\n 'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,\n 'bar_format': self.bar_format, 'postfix': self.postfix,\n 'unit_divisor': self.unit_divisor, 'initial': self.initial,\n 'colour': self.colour}\n\n def display(self, msg=None, pos=None):\n \"\"\"\n Use `self.sp` to display `msg` in the specified `pos`.\n\n Consider overloading this function when inheriting to use e.g.:\n `self.some_frontend(**self.format_dict)` instead of `self.sp`.\n\n Parameters\n ----------\n msg : str, optional. What to display (default: `repr(self)`).\n pos : int, optional. Position to `moveto`\n (default: `abs(self.pos)`).\n \"\"\"\n if pos is None:\n pos = abs(self.pos)\n\n nrows = self.nrows or 20\n if pos >= nrows - 1:\n if pos >= nrows:\n return False\n if msg or msg is None: # override at `nrows - 1`\n msg = \" ... (more hidden) ...\"\n\n if not hasattr(self, \"sp\"):\n raise TqdmDeprecationWarning(\n \"Please use `tqdm.gui.tqdm(...)`\"\n \" instead of `tqdm(..., gui=True)`\\n\",\n fp_write=getattr(self.fp, 'write', sys.stderr.write))\n\n if pos:\n self.moveto(pos)\n self.sp(self.__str__() if msg is None else msg)\n if pos:\n self.moveto(-pos)\n return True\n\n @classmethod\n @contextmanager\n def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):\n \"\"\"\n stream : file-like object.\n method : str, \"read\" or \"write\". The result of `read()` and\n the first argument of `write()` should have a `len()`.\n\n >>> with tqdm.wrapattr(file_obj, \"read\", total=file_obj.size) as fobj:\n ... while True:\n ... chunk = fobj.read(chunk_size)\n ... if not chunk:\n ... break\n \"\"\"\n with cls(total=total, **tqdm_kwargs) as t:\n if bytes:\n t.unit = \"B\"\n t.unit_scale = True\n t.unit_divisor = 1024\n yield CallbackIOWrapper(t.update, stream, method)" }, { "identifier": "__version__", "path": "venv/Lib/site-packages/tqdm/version.py", "snippet": "" } ]
import logging import re import sys from ast import literal_eval as numeric from .std import TqdmKeyError, TqdmTypeError, tqdm from .version import __version__ from importlib import resources from os import path from shutil import copyfile
14,060
if not tmp: if buf: fp_write(buf) if callback_len: # n += 1 + buf.count(delim) callback(1 + buf.count(delim)) else: for i in buf.split(delim): callback(i) getattr(fout, 'flush', lambda: None)() return # n while True: i = tmp.find(delim) if i < 0: buf += tmp break fp_write(buf + tmp[:i + len(delim)]) # n += 1 callback(1 if callback_len else (buf + tmp[:i])) buf = b'' tmp = tmp[i + len_delim:] # ((opt, type), ... ) RE_OPTS = re.compile(r'\n {4}(\S+)\s{2,}:\s*([^,]+)') # better split method assuming no positional args RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(\s+|=|$)') # TODO: add custom support for some of the following? UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file') # The 8 leading spaces are required for consistency CLI_EXTRA_DOC = r""" Extra CLI Options ----------------- name : type, optional TODO: find out why this is needed. delim : chr, optional Delimiting character [default: '\n']. Use '\0' for null. N.B.: on Windows systems, Python converts '\n' to '\r\n'. buf_size : int, optional String buffer size in bytes [default: 256] used when `delim` is specified. bytes : bool, optional If true, will count bytes, ignore `delim`, and default `unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'. tee : bool, optional If true, passes `stdin` to both `stderr` and `stdout`. update : bool, optional If true, will treat input as newly elapsed iterations, i.e. numbers to pass to `update()`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. update_to : bool, optional If true, will treat input as total elapsed iterations, i.e. numbers to assign to `self.n`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. null : bool, optional If true, will discard input (no stdout). manpath : str, optional Directory in which to install tqdm man pages. comppath : str, optional Directory in which to place tqdm completion. log : str, optional CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET. """ def main(fp=sys.stderr, argv=None): """ Parameters (internal use only) --------- fp : file-like object for tqdm argv : list (default: sys.argv[1:]) """ if argv is None: argv = sys.argv[1:] try: log_idx = argv.index('--log') except ValueError: for i in argv: if i.startswith('--log='): logLevel = i[len('--log='):] break else: logLevel = 'INFO' else: # argv.pop(log_idx) # logLevel = argv.pop(log_idx) logLevel = argv[log_idx + 1] logging.basicConfig(level=getattr(logging, logLevel), format="%(levelname)s:%(module)s:%(lineno)d:%(message)s") d = tqdm.__doc__ + CLI_EXTRA_DOC opt_types = dict(RE_OPTS.findall(d)) # opt_types['delim'] = 'chr' for o in UNSUPPORTED_OPTS: opt_types.pop(o) log.debug(sorted(opt_types.items())) # d = RE_OPTS.sub(r' --\1=<\1> : \2', d) split = RE_OPTS.split(d) opt_types_desc = zip(split[1::3], split[2::3], split[3::3]) d = ''.join(('\n --{0} : {2}{3}' if otd[1] == 'bool' else '\n --{0}=<{1}> : {2}{3}').format( otd[0].replace('_', '-'), otd[0], *otd[1:]) for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS) help_short = "Usage:\n tqdm [--help | options]\n" d = help_short + """ Options: -h, --help Print this help and exit. -v, --version Print version and exit. """ + d.strip('\n') + '\n' # opts = docopt(d, version=__version__) if any(v in argv for v in ('-v', '--version')):
""" Module version for monitoring CLI pipes (`... | python -m tqdm | ...`). """ __all__ = ["main"] log = logging.getLogger(__name__) def cast(val, typ): log.debug((val, typ)) if " or " in typ: for t in typ.split(" or "): try: return cast(val, t) except TqdmTypeError: pass raise TqdmTypeError(val + ' : ' + typ) # sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n') if typ == 'bool': if (val == 'True') or (val == ''): return True elif val == 'False': return False else: raise TqdmTypeError(val + ' : ' + typ) try: return eval(typ + '("' + val + '")') except Exception: if typ == 'chr': return chr(ord(eval('"' + val + '"'))).encode() else: raise TqdmTypeError(val + ' : ' + typ) def posix_pipe(fin, fout, delim=b'\\n', buf_size=256, callback=lambda float: None, callback_len=True): """ Params ------ fin : binary file with `read(buf_size : int)` method fout : binary file with `write` (and optionally `flush`) methods. callback : function(float), e.g.: `tqdm.update` callback_len : If (default: True) do `callback(len(buffer))`. Otherwise, do `callback(data) for data in buffer.split(delim)`. """ fp_write = fout.write if not delim: while True: tmp = fin.read(buf_size) # flush at EOF if not tmp: getattr(fout, 'flush', lambda: None)() return fp_write(tmp) callback(len(tmp)) # return buf = b'' len_delim = len(delim) # n = 0 while True: tmp = fin.read(buf_size) # flush at EOF if not tmp: if buf: fp_write(buf) if callback_len: # n += 1 + buf.count(delim) callback(1 + buf.count(delim)) else: for i in buf.split(delim): callback(i) getattr(fout, 'flush', lambda: None)() return # n while True: i = tmp.find(delim) if i < 0: buf += tmp break fp_write(buf + tmp[:i + len(delim)]) # n += 1 callback(1 if callback_len else (buf + tmp[:i])) buf = b'' tmp = tmp[i + len_delim:] # ((opt, type), ... ) RE_OPTS = re.compile(r'\n {4}(\S+)\s{2,}:\s*([^,]+)') # better split method assuming no positional args RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(\s+|=|$)') # TODO: add custom support for some of the following? UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file') # The 8 leading spaces are required for consistency CLI_EXTRA_DOC = r""" Extra CLI Options ----------------- name : type, optional TODO: find out why this is needed. delim : chr, optional Delimiting character [default: '\n']. Use '\0' for null. N.B.: on Windows systems, Python converts '\n' to '\r\n'. buf_size : int, optional String buffer size in bytes [default: 256] used when `delim` is specified. bytes : bool, optional If true, will count bytes, ignore `delim`, and default `unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'. tee : bool, optional If true, passes `stdin` to both `stderr` and `stdout`. update : bool, optional If true, will treat input as newly elapsed iterations, i.e. numbers to pass to `update()`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. update_to : bool, optional If true, will treat input as total elapsed iterations, i.e. numbers to assign to `self.n`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. null : bool, optional If true, will discard input (no stdout). manpath : str, optional Directory in which to install tqdm man pages. comppath : str, optional Directory in which to place tqdm completion. log : str, optional CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET. """ def main(fp=sys.stderr, argv=None): """ Parameters (internal use only) --------- fp : file-like object for tqdm argv : list (default: sys.argv[1:]) """ if argv is None: argv = sys.argv[1:] try: log_idx = argv.index('--log') except ValueError: for i in argv: if i.startswith('--log='): logLevel = i[len('--log='):] break else: logLevel = 'INFO' else: # argv.pop(log_idx) # logLevel = argv.pop(log_idx) logLevel = argv[log_idx + 1] logging.basicConfig(level=getattr(logging, logLevel), format="%(levelname)s:%(module)s:%(lineno)d:%(message)s") d = tqdm.__doc__ + CLI_EXTRA_DOC opt_types = dict(RE_OPTS.findall(d)) # opt_types['delim'] = 'chr' for o in UNSUPPORTED_OPTS: opt_types.pop(o) log.debug(sorted(opt_types.items())) # d = RE_OPTS.sub(r' --\1=<\1> : \2', d) split = RE_OPTS.split(d) opt_types_desc = zip(split[1::3], split[2::3], split[3::3]) d = ''.join(('\n --{0} : {2}{3}' if otd[1] == 'bool' else '\n --{0}=<{1}> : {2}{3}').format( otd[0].replace('_', '-'), otd[0], *otd[1:]) for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS) help_short = "Usage:\n tqdm [--help | options]\n" d = help_short + """ Options: -h, --help Print this help and exit. -v, --version Print version and exit. """ + d.strip('\n') + '\n' # opts = docopt(d, version=__version__) if any(v in argv for v in ('-v', '--version')):
sys.stdout.write(__version__ + '\n')
3
2023-12-24 15:46:18+00:00
16k
pkariz/grin-explorer
backend/api/views.py
[ { "identifier": "fetch_and_store_block", "path": "backend/api/bootstrap.py", "snippet": "def fetch_and_store_block(blockchain, block_height, prefetch=True):\n # initialize node api\n node_api = NodeV2API(blockchain.node)\n if block_height < 0:\n # no such block height\n raise NodeBlockNotFoundException()\n if prefetch:\n block_data = get_prefetched_header_and_block_data(blockchain.node, block_height)\n else:\n block_data = node_api.get_block(height=block_height)\n header_data = block_data['header']\n timestamp = parse_datetime(header_data['timestamp'])\n hash = header_data['hash']\n # create header instance\n cuckoo_solution = ','.join(map(str, header_data['cuckoo_solution']))\n with transaction.atomic():\n header, header_created = BlockHeader.objects.get_or_create(\n blockchain=blockchain,\n cuckoo_solution=cuckoo_solution,\n kernel_root=header_data['kernel_root'],\n defaults={\n 'version': header_data['version'],\n 'output_root': header_data['output_root'],\n 'range_proof_root': header_data['range_proof_root'],\n 'kernel_mmr_size': header_data['kernel_mmr_size'],\n 'output_mmr_size': header_data['output_mmr_size'],\n 'nonce': str(header_data['nonce']),\n 'edge_bits': header_data['edge_bits'],\n 'secondary_scaling': header_data['secondary_scaling'],\n 'total_difficulty': header_data['total_difficulty'],\n 'total_kernel_offset': header_data['total_kernel_offset'],\n }\n )\n # create block instance\n try:\n block, block_created = Block.objects.get_or_create(\n blockchain=blockchain,\n hash=hash,\n height=block_height,\n timestamp=timestamp,\n header=header,\n prev_hash=block_data['header']['previous'],\n reorg=None,\n nr_inputs=len(block_data['inputs']),\n nr_outputs=len(block_data['outputs']),\n nr_kernels=len(block_data['kernels']),\n )\n except IntegrityError as e:\n # race condition so it's a duplicate. We can skip creation process\n # and just return the block that we already have\n return Block.objects.get(blockchain=blockchain, hash=hash)\n\n if not block_created:\n # we have already fetched all the data since it's done in an atomic\n # transaction, so skip unnecessary work\n return block\n\n # bulk create kernels\n kernels = []\n for kernel_data in block_data['kernels']:\n kernels.append(\n Kernel(\n block=block,\n features=kernel_data['features'],\n fee=kernel_data['fee'],\n fee_shift=kernel_data['fee_shift'],\n lock_height=kernel_data['lock_height'],\n excess=kernel_data['excess'],\n excess_sig=kernel_data['excess_sig'],\n )\n )\n Kernel.objects.bulk_create(kernels)\n\n inputs = []\n # create input instances\n outputs_data = Output.objects\\\n .filter(\n commitment__in=block_data['inputs'],\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\\\n .values('id', 'commitment')\n outputs_mapper = { output_data['commitment'] : output_data['id'] for output_data in outputs_data }\n for input_data in block_data['inputs']:\n inputs.append(\n Input(\n block=block,\n commitment=input_data,\n output_id=outputs_mapper.get(input_data),\n )\n )\n Input.objects.bulk_create(inputs)\n # mark the corresponding outputs as spent, but only on the main chain so\n # that we don't corrupt the reorged data\n Output.objects.filter(pk__in=outputs_mapper.values()).update(spent=True)\n\n # create output instances\n outputs = []\n inputs = Input.objects\\\n .filter(\n commitment__in=list(map(lambda x: x['commit'], block_data['outputs'])),\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\n inputs_mapper = { input.commitment : input for input in inputs }\n for output_data in block_data['outputs']:\n outputs.append(\n Output(\n block=block,\n output_type=output_data['output_type'],\n commitment=output_data['commit'],\n spent=output_data['spent'],\n proof=output_data['proof'],\n proof_hash=output_data['proof_hash'],\n merkle_proof=output_data['merkle_proof'],\n mmr_index=output_data['mmr_index'],\n )\n )\n outputs = Output.objects.bulk_create(outputs)\n # link inputs to created outputs, but only on the main chain so that we\n # don't corrupt the reorged data\n fixed_inputs = []\n for output in outputs:\n matching_input = inputs_mapper.get(output.commitment)\n if matching_input:\n matching_input.output = output\n fixed_inputs.append(matching_input)\n Input.objects.bulk_update(fixed_inputs, ['output'])\n return block" }, { "identifier": "update_blockchain_progress", "path": "backend/api/bootstrap.py", "snippet": "def update_blockchain_progress(blockchain):\n try:\n start_height, end_height = blockchain.get_bootstrap_heights()\n except Exception as e:\n logger.warning(\n 'Failed to get bootstrap heights',\n extra={ 'blockchain': blockchain.slug },\n )\n raise UpdateBlockchainProgressError(blockchain.slug)\n expected_heights = set(range(start_height, end_height + 1))\n existing_heights = set(list(\n blockchain.blocks\\\n .filter(reorg__isnull=True)\\\n .values_list('height', flat=True)\n ))\n missing_heights = expected_heights - existing_heights\n update_load_progress(\n blockchain, \n len(missing_heights),\n end_height - start_height + 1,\n 1,\n 1,\n 2,\n verbose=True\n )" }, { "identifier": "UpdateBlockchainProgressError", "path": "backend/api/exceptions.py", "snippet": "class UpdateBlockchainProgressError(Exception):\n pass" }, { "identifier": "get_filter_backends", "path": "backend/api/helpers.py", "snippet": "def get_filter_backends(replacements):\n \"\"\"\n Returns a tuple of filter backends where default ones, from DefaultMixin,\n are replaced with the given replacements.\n\n Args:\n replacements: dict where key is an existing filter backend class's\n __name__ and value is its replacement filter backend class\n \"\"\"\n current_filters = DefaultMixin.filter_backends\n return tuple([\n filter if filter.__name__ not in replacements else replacements[filter.__name__]\n for filter in list(current_filters)\n ])" }, { "identifier": "load_data_from_redis", "path": "backend/api/helpers.py", "snippet": "def load_data_from_redis(redis_key):\n r = redis.Redis(host='redis')\n data = r.get(redis_key)\n if data is None:\n return\n return json.loads(data)" }, { "identifier": "BlockFilter", "path": "backend/api/filters.py", "snippet": "class BlockFilter(filters.FilterSet):\n class Meta:\n model = Block\n fields = ('blockchain', 'height', 'hash')" }, { "identifier": "CustomBlockSearchFilter", "path": "backend/api/filters.py", "snippet": "class CustomBlockSearchFilter(DRFfilters.SearchFilter):\n \"\"\"\n Alongside the given search_fields this filter filters also by:\n - keyword 'reorgs' --> return only blocks where reorgs happened\n - ['inputs', 'outputs', 'kernels'] ['=', '<', '>', '<=', '>='] [value] -->\n return only blocks matching this computation, eg: 'inputs > 2'\n You cannot combine different types of search (eg. 'reorgs' + 'computation')\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n queryset = super().filter_queryset(request, queryset, view)\n blockchain_slug = view.kwargs['blockchain_slug']\n original_search_terms = self.get_search_terms(request)\n search_terms = self._get_normalized_search_terms(original_search_terms)\n if len(search_terms) == 0:\n # searches:\n # - height --> add filter reorg=None\n # - hash --> nothing to add\n # - outputhash --> add filter reorg=None\n # - block-detail --> nothing to add\n # - block-list --> add filter reorg=None\n if len(original_search_terms) > 1:\n raise APIException('Too many standard search terms')\n if not original_search_terms:\n # it's either an unfiltered block-list or block-detail\n if view.action == 'list':\n queryset = queryset.filter(reorg=None)\n else:\n # there's only 1 original search term, figure out which one\n if len(original_search_terms[0]) != 64:\n # it's not block hash but either block height or output hash\n # in both cases we need to filter out reorgs\n queryset = queryset.filter(reorg=None)\n return queryset\n searched_types = set(map(lambda x: x['type'], search_terms))\n if len(searched_types) > 1:\n raise APIException('Cannot combine different types of searches')\n if searched_types == { 'reorgs' }:\n return self._get_reorgs_qs(blockchain_slug)\n elif searched_types == { 'computation' }:\n return self._get_computations_qs(search_terms, blockchain_slug)\n elif searched_types == { 'hash' }:\n return self._get_hash_qs(search_terms[0]['value'], blockchain_slug, queryset)\n elif searched_types == { 'height' }:\n return self._get_height_qs(search_terms[0]['value'], blockchain_slug)\n elif searched_types == { 'kernel_or_output' }:\n return self._get_kernel_or_output_qs(\n search_terms[0]['value'], blockchain_slug)\n else:\n logger.exception(\n 'Invalid search terms',\n exc_info=e,\n extra={'search_terms': search_terms}\n )\n raise APIException('Invalid search terms')\n\n def _get_normalized_search_terms(self, search_terms):\n \"\"\"\n Search terms of format ['outputs>1'] are not supported. Instead, the\n operators should be surrounded by spaces, eg. ['outputs', '>', '1'].\n Supported operators are ['=', '>', '<', '<=', '>=']\n \"\"\"\n supported_operators = ['=', '>', '<', '<=', '>=']\n normalized_terms = []\n i = 0\n while i <= len(search_terms) - 1:\n if isinstance(search_terms[i], str) and search_terms[i].lower() in ['inputs', 'outputs', 'kernels']:\n operator = search_terms[i+1]\n if operator not in supported_operators:\n raise APIException('Invalid search operator')\n value = int(search_terms[i+2])\n if value < 0:\n raise APIException('Invalid search computation')\n normalized_terms.append({\n 'type': 'computation',\n 'source': search_terms[i],\n 'op': operator,\n 'value': value,\n })\n i += 3\n elif isinstance(search_terms[i], str) and search_terms[i].lower() == 'reorgs':\n normalized_terms.append({ 'type': 'reorgs' })\n i += 1\n elif len(search_terms[i]) == 64:\n # hash\n normalized_terms.append({\n 'type': 'hash',\n 'value': search_terms[i],\n })\n i += 1\n elif len(search_terms[i]) == 66:\n # kernel excess or output commitment\n normalized_terms.append({\n 'type': 'kernel_or_output',\n 'value': search_terms[i],\n })\n i += 1\n else:\n try:\n value = int(search_terms[i])\n except ValueError:\n value = None\n if value >= 0:\n normalized_terms.append({\n 'type': 'height',\n 'value': value,\n })\n i += 1\n else:\n # term which is not for this custom search, eg. block hash\n i += 1\n return normalized_terms\n\n def _get_reorgs_qs(self, blockchain_slug):\n # NOTE: we first filter, then calculate reorg_len on filtered data and\n # then filter on annotated data that we've calculated\n reorg_heights = list(Reorg.objects\\\n .select_related('start_main_block')\\\n .filter(\n blockchain__slug=blockchain_slug,\n start_main_block__reorg=None,\n )\\\n .annotate(reorg_len=F('end_reorg_block__height') - F('start_reorg_block__height') + 1)\\\n .filter(reorg_len__gte=settings.MIN_REORG_LEN)\\\n .values_list('start_main_block__height', flat=True)\n )\n queryset = Block.objects\\\n .filter(\n blockchain__slug=blockchain_slug,\n reorg=None,\n height__in=reorg_heights,\n )\\\n .order_by('-height')\n return queryset\n\n def _get_hash_qs(self, hash, blockchain_slug, queryset):\n return queryset.filter(\n blockchain__slug=blockchain_slug,\n hash=hash,\n )\n\n def _get_height_qs(self, height, blockchain_slug):\n return Block.objects.filter(\n blockchain__slug=blockchain_slug,\n height=height,\n )\n\n def _get_kernel_or_output_qs(self, kernel_or_output, blockchain_slug):\n kernel = Kernel.objects.filter(\n excess=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if kernel:\n return Block.objects.filter(hash=kernel.block.hash)\n output = Output.objects.filter(\n commitment=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if output:\n return Block.objects.filter(hash=output.block.hash)\n return Block.objects.none()\n\n def _get_computations_qs(self, search_terms, blockchain_slug):\n operator_mapping = {\n '=': '',\n '>': '__gt',\n '<': '__lt',\n '<=': '__lte',\n '>=': '__gte',\n }\n possible_sources = ['inputs', 'outputs', 'kernels']\n searched_sources = set(map(lambda x: x['source'], search_terms))\n op_searched_types = set(possible_sources) & set(searched_sources)\n op_qs = Blockchain.objects.get(slug=blockchain_slug).blocks.all()\n for search_term in search_terms:\n filters = {\n 'blockchain__slug': blockchain_slug,\n 'reorg': None,\n }\n op_map = operator_mapping[search_term['op']]\n filters[f'nr_{search_term[\"source\"]}{op_map}'] = search_term['value']\n op_qs = op_qs.filter(**filters).order_by('-height')\n return op_qs" }, { "identifier": "NodeFilter", "path": "backend/api/filters.py", "snippet": "class NodeFilter(filters.FilterSet):\n class Meta:\n model = Node\n fields = ('name', 'slug', 'archive')" }, { "identifier": "NodeGroupFilter", "path": "backend/api/filters.py", "snippet": "class NodeGroupFilter(filters.FilterSet):\n class Meta:\n model = NodeGroup\n fields = ('name', 'slug')" }, { "identifier": "CustomModelViewSet", "path": "backend/api/mixins.py", "snippet": "class CustomModelViewSet(\n DefaultMixin,\n viewsets.ModelViewSet\n):\n \"\"\"Default viewset for models.\"\"\"\n pass" }, { "identifier": "Blockchain", "path": "backend/api/models.py", "snippet": "class Blockchain(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # testnet, mainnet etc\n name = models.CharField(max_length=255, unique=True)\n # slug of the name, we use it in url\n slug = models.SlugField(max_length=255, unique=True)\n # node from which the data is fetched\n node = models.ForeignKey(\n Node, related_name='blockchains', on_delete=models.PROTECT)\n # the default blockchain will be picked on the gui by default\n default = models.BooleanField(default=False)\n # if fetch_price is False then the shown price will always be 0.\n # Testnets and localnets should have this set to false.\n fetch_price = models.BooleanField(default=True)\n # load_progress shows current % of loaded blocks. If archive is True then\n # load_progress will represent % of missing all blocks, otherwise % of\n # missing blocks from the latest 1440 blocks\n load_progress = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n default=0.0,\n validators=[MinValueValidator(0), MaxValueValidator(100)]\n )\n\n def __str__(self):\n return f'{self.name} - {self.load_progress} [Node<{self.node}>]'\n\n def bootstrap(self, skip_reorg_check=False):\n # import here to avoid cyclic import\n from .bootstrap import load_blocks\n\n start_height, end_height = self.get_bootstrap_heights()\n load_blocks(self, start_height, end_height, skip_reorg_check)\n\n def get_tip_height(self):\n node_api = NodeV2API(self.node)\n try:\n end_block = node_api.get_tip()['height']\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get node tip')\n raise e\n return end_block\n\n def get_progress_decimal_places(self):\n if self.node.archive:\n return 2\n return 0\n\n def get_bootstrap_heights(self):\n node_api = NodeV2API(self.node)\n end_height = self.get_tip_height()\n try:\n start_height = node_api.get_blocks(0, end_height, 1, False)['blocks'][0]['header']['height']\n except IndexError:\n raise Exception('Node has no blocks.')\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get first block height')\n raise e\n return start_height, end_height\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n if self.default:\n # set other blockchain.default to False\n other_blockchains = Blockchain.objects.all()\n if self.pk:\n other_blockchains = other_blockchains.exclude(pk=self.pk)\n other_blockchains.update(default=False)\n # blockchain doesn't change much so this call doesn't hurt\n old_instance = Blockchain.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.load_progress != old_instance.load_progress:\n # load progress changed, send info\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_progress_changed',\n 'message': {\n 'slug': self.slug,\n # convert to float since Decimal is not serializable\n 'load_progress': float(self.load_progress),\n },\n }\n )\n return res\n\n def full_print(self):\n \"\"\"Used for developing and debugging.\"\"\"\n print('MAIN CHAIN:')\n for block in self.blocks.filter(reorg=None).order_by('height'):\n print(' --> ' + block.hash)\n for reorg in Reorg.objects.filter(blockchain=self):\n print('REORG:')\n for block in Block.objects.filter(reorg=reorg).order_by('height'):\n print(' --> ' + block.hash)\n print('------------------------------------------------------')\n\n def reset(self):\n \"\"\"Used for developing and debugging.\"\"\"\n from .models import Block, BlockHeader, Input, Output, Kernel, DramatiqTask, Reorg\n from django.contrib.contenttypes.models import ContentType\n from decimal import Decimal\n\n Input.objects.filter(block__blockchain=self).delete()\n Output.objects.filter(block__blockchain=self).delete()\n Kernel.objects.filter(block__blockchain=self).delete()\n self.reorgs.all().delete()\n\n content_type = ContentType.objects.get_for_model(self)\n DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=self.id,\n ).delete()\n # removing header will also remove the block\n BlockHeader.objects.filter(block__blockchain=self).delete()\n self.load_progress = Decimal('0')\n self.save()" }, { "identifier": "Block", "path": "backend/api/models.py", "snippet": "class Block(TimeStampedModel):\n blockchain = models.ForeignKey(\n Blockchain, related_name='blocks', on_delete=models.CASCADE)\n hash = models.CharField(\n primary_key=True,\n max_length=64,\n validators=[MinLengthValidator(64)],\n db_index=True,\n )\n height = models.PositiveIntegerField(db_index=True)\n timestamp = models.DateTimeField(db_index=True)\n header = models.ForeignKey(\n 'BlockHeader', related_name='block', on_delete=models.CASCADE)\n prev_hash = models.CharField(\n max_length=64,\n null=True,\n blank=True,\n validators=[MinLengthValidator(64)],\n )\n nr_inputs = models.PositiveIntegerField(default=0)\n nr_outputs = models.PositiveIntegerField(default=0)\n nr_kernels = models.PositiveIntegerField(default=0)\n # when reorg is set it means this block is part of a reorg and not the main\n # chain\n reorg = models.ForeignKey(\n 'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)\n\n def __str__(self):\n suffix = ''\n if self.reorg:\n suffix = ' Reorged: {}'.format(self.reorg.id)\n return '{}: {} (prev: {})'.format(\n self.height, self.hash, self.prev_hash)\n\n def get_next_block(self):\n return Block.objects.filter(prev_hash=self.hash).first()\n\n def get_previous_block(self):\n return Block.objects.filter(hash=self.prev_hash).first()\n\n def full_print(self, prefix=''):\n \"\"\"Used for developing and debugging.\"\"\"\n print('---------------------------------------------------------------')\n print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')\n print(f'{prefix} INPUTS:')\n for input in self.inputs.all():\n print(f'{prefix} {input}, output: {input.output}')\n print(f'{prefix} OUTPUTS:')\n for output in self.outputs.all():\n print(f'{prefix} {output}')\n print(f'{prefix} KERNELS:')\n for kernel in self.kernels.all():\n print(f'{prefix} {kernel}')\n print('---------------------------------------------------------------')" }, { "identifier": "Reorg", "path": "backend/api/models.py", "snippet": "class Reorg(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n blockchain = models.ForeignKey(\n Blockchain, related_name='reorgs', on_delete=models.CASCADE)\n # start_reorg_block and end_reorg_block define starting and ending block,\n # which were reorged\n start_reorg_block = models.ForeignKey(\n Block, related_name='start_reorgs', on_delete=models.CASCADE)\n end_reorg_block = models.ForeignKey(\n Block, related_name='end_reorgs', on_delete=models.CASCADE)\n # start_main_block defines starting block which is the new start of the main\n # chain - the block that replaced start_reorg_block. We usually don't know\n # which the ending block is when we spot the reorg, so we don't store it\n # (we don't even have it in DB at that time yet since we usually get them\n # incrementally in the order they're accepted).\n start_main_block = models.ForeignKey(\n Block, related_name='start_mains', on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}: start: {}, end: {}'.format(\n self.blockchain.slug, self.start_reorg_block, self.end_reorg_block)" }, { "identifier": "Node", "path": "backend/api/models.py", "snippet": "class Node(TimeStampedModel):\n \"\"\"Node on the network. Currently it only supports grin-rust.\"\"\"\n id = models.BigAutoField(primary_key=True)\n # name can be whatever\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n group = models.ForeignKey(\n NodeGroup, related_name='nodes', on_delete=models.PROTECT)\n # foreign api url of the grin-rust node\n api_url = models.URLField()\n # username of the grin-rust node\n api_username = models.CharField(max_length=255)\n # foreign api secret of the grin-rust node\n api_password = models.CharField(max_length=255)\n # if archive is true then we fetch every block when we bootstrap, otherwise\n # we fetch only latest 1440 blocks (1 day)\n archive = models.BooleanField(default=False)\n\n def __str__(self):\n repr = f'{self.name}'\n if self.archive:\n repr += ' (archive)'\n return repr\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n return super().save(*args, **kwargs)\n\n def is_reachable(self):\n try:\n NodeV2API(self).get_tip()\n return True\n except (\n RequestsConnectionError,\n RequestsTimeout,\n RequestsHTTPError,\n RequestsReadTimeout\n ):\n logger.exception('Node unreachable', extra={'node': self.slug})\n return False" }, { "identifier": "NodeGroup", "path": "backend/api/models.py", "snippet": "class NodeGroup(models.Model):\n \"\"\"\n NodeGroup represents a group of nodes. These nodes should be on the same\n network.:\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n # name is probably mainnet, testnet or smth similar\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n self.full_clean()\n return super().save(*args, **kwargs)" }, { "identifier": "DramatiqTask", "path": "backend/api/models.py", "snippet": "class DramatiqTask(TimeStampedModel):\n \"\"\"We store task's message_id so that we can abort the task.\"\"\"\n\n class Type(models.TextChoices):\n BOOTSTRAP = 'bootstrap', 'Bootstrap'\n BLOCKCHAIN_DELETE = 'blockchain_delete', 'Blockchain delete'\n\n class Status(models.TextChoices):\n # NOTE: IN_PROGRESS doesn't really mean it's already in progress, just\n # that it has been sent\n IN_PROGRESS = 'in_progress', 'In progress'\n SKIPPED = 'skipped', 'Skipped'\n SUCCESS = 'success', 'Success'\n FAILURE = 'failure', 'Failure'\n\n id = models.BigAutoField(primary_key=True)\n message_id = models.CharField(max_length=255, unique=True)\n # type tells us what this task is doing, eg. 'bootstrap'\n type = models.CharField(max_length=255, choices=Type.choices)\n status = models.CharField(max_length=255, choices=Status.choices)\n # failure_reason should be short and concise\n failure_reason = models.TextField(null=True, default=None)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n def save(self, *args, **kwargs):\n from .serializers import DramatiqTaskSerializer\n old_instance = DramatiqTask.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.status != old_instance.status:\n # status changed, send info\n print('sending task status update')\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'task_status_changed',\n 'message': DramatiqTaskSerializer(self).data,\n }\n )\n return res" }, { "identifier": "BlockchainSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockchainSerializer(serializers.ModelSerializer):\n node = serializers.PrimaryKeyRelatedField(queryset=Node.objects.all(), write_only=True)\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'default', 'node', 'load_progress', 'fetch_price')" }, { "identifier": "BlockchainExtendedSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockchainExtendedSerializer(serializers.ModelSerializer):\n tasks = serializers.SerializerMethodField()\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'node', 'default', 'load_progress', 'fetch_price', 'tasks')\n\n def to_representation(self, obj):\n self.fields['node'] = NodeSerializer()\n return super().to_representation(obj)\n\n def get_tasks(self, blockchain):\n content_type = ContentType.objects.get_for_model(blockchain)\n tasks = DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=blockchain.id,\n )\n return DramatiqTaskSimpleSerializer(tasks, many=True).data" }, { "identifier": "BlockSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockSerializer(serializers.ModelSerializer):\n blockchain = BlockchainSerializer()\n header = BlockHeaderSerializer()\n starting_reorg_blocks = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'reorg',\n 'nr_kernels',\n 'nr_inputs',\n 'nr_outputs',\n 'blockchain',\n 'starting_reorg_blocks',\n )\n\n def get_starting_reorg_blocks(self, block):\n reorgs = Reorg.objects.filter(start_main_block=block)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return BlockSerializer(\n [reorg.start_reorg_block for reorg in reorgs], many=True).data" }, { "identifier": "BlockDetailSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockDetailSerializer(serializers.ModelSerializer):\n header = BlockHeaderSerializer()\n kernels = KernelSerializer(many=True)\n inputs = InputSerializer(many=True)\n outputs = OutputSerializer(many=True)\n blockchain = BlockchainSerializer()\n confirmations = serializers.SerializerMethodField()\n next_hash = serializers.SerializerMethodField()\n next_block_reorgs = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'kernels',\n 'inputs',\n 'outputs',\n 'blockchain',\n 'confirmations',\n 'next_hash',\n 'reorg',\n 'next_block_reorgs',\n )\n\n def get_confirmations(self, block):\n # in reorged blocks we show confirmations based on the reorged chain!\n tip_height = block.blockchain.blocks\\\n .filter(reorg=block.reorg)\\\n .order_by('-height')\\\n .first().height\n return tip_height - block.height + 1\n\n def get_next_hash(self, block):\n try:\n return Block.objects.get(\n blockchain=block.blockchain,\n reorg=block.reorg,\n prev_hash=block.hash\n ).hash\n except Block.DoesNotExist:\n return None\n\n def get_next_block_reorgs(self, block):\n from .serializers import ReorgSerializer\n reorgs = Reorg.objects.filter(start_main_block__prev_hash=block.hash)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return ReorgSerializer(reorgs, many=True).data" }, { "identifier": "NodeSerializer", "path": "backend/api/serializers.py", "snippet": "class NodeSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Node\n fields = '__all__'" }, { "identifier": "NodeGroupSerializer", "path": "backend/api/serializers.py", "snippet": "class NodeGroupSerializer(serializers.ModelSerializer):\n nodes = NodeSerializer(many=True, read_only=True)\n\n class Meta:\n model = NodeGroup\n fields = '__all__'" }, { "identifier": "DramatiqTaskSerializer", "path": "backend/api/serializers.py", "snippet": "class DramatiqTaskSerializer(serializers.ModelSerializer):\n content_object = serializers.SerializerMethodField()\n\n class Meta:\n model = DramatiqTask\n fields = (\n 'id',\n 'message_id',\n 'type',\n 'status',\n 'failure_reason',\n 'content_object',\n )\n\n def get_content_object(self, task):\n from .serializers import BlockchainSerializer\n serializer_mapper = {\n 'Blockchain': BlockchainSerializer,\n }\n klass = task.content_object.__class__\n return {\n 'model': klass._meta.model_name,\n 'data': serializer_mapper[klass.__name__](task.content_object).data,\n }" }, { "identifier": "bootstrap_blockchain", "path": "backend/api/tasks.py", "snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef bootstrap_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).bootstrap()" }, { "identifier": "delete_blockchain", "path": "backend/api/tasks.py", "snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef delete_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).delete()\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_deleted',\n 'message': {\n 'slug': blockchain_slug,\n },\n }\n )" } ]
from asgiref.sync import async_to_sync from django.contrib.contenttypes.models import ContentType from django.db.models.deletion import ProtectedError from django.views.generic import TemplateView from django.views.decorators.cache import never_cache from dramatiq_abort import abort from rest_framework import status from rest_framework.exceptions import APIException from rest_framework.exceptions import NotFound from rest_framework.exceptions import ValidationError as DRFValidationError from rest_framework.decorators import action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from slugify import slugify from .bootstrap import fetch_and_store_block, update_blockchain_progress from .exceptions import UpdateBlockchainProgressError from .helpers import get_filter_backends, load_data_from_redis from .filters import ( BlockFilter, CustomBlockSearchFilter, NodeFilter, NodeGroupFilter, ) from .mixins import CustomModelViewSet from .models import Blockchain, Block, Reorg, Node, NodeGroup, DramatiqTask from .serializers import ( BlockchainSerializer, BlockchainExtendedSerializer, BlockSerializer, BlockDetailSerializer, NodeSerializer, NodeGroupSerializer, DramatiqTaskSerializer, ) from .tasks import bootstrap_blockchain, delete_blockchain import channels import logging import pytz
10,860
# call this view for each block it will get. In this case there will be # many fast sequential calls to this view, there might be too many # postgres connections opened so view executions might actually fail. # The suggested solution is to comment out 'block_accepted_url' in # node's config file, run the node, wait for it to sync, uncomment # 'block_accepted_url' and then manually bootstrap it. blockchain = self.get_object() # check if new block has been receiver when this blockchain is in the # process of being deleted. deleting = DramatiqTask.objects.filter( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).exists() if deleting: # nothing to do, ignore the new block return Response(status=status.HTTP_404_NOT_FOUND) # get request data height = request.data['data']['header']['height'] hash = request.data['hash'] # prev_hash comes as list of int bytes, so we convert it to hex # NOTE: the same is true for some other data which we currently don't # need so we don't transform it, eg. data.header.kernel_root prev_hash = None if request.data['data']['header']['prev_hash']: prev_hash = bytes(request.data['data']['header']['prev_hash']).hex() logger.info( 'Block accepted', extra={ 'height': height, 'hash': hash, 'prev_hash': prev_hash, 'blockchain': blockchain.slug, }, ) web_socket_msg_type = 'send_block' # handle reorg case # we expect blocks to come ordered by height, there are some edge cases # here which are not handled, but they're unlikely to happen (eg. reorg # happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged'
logger = logging.getLogger(__name__) # Serve Vue Application index_view = never_cache(TemplateView.as_view(template_name='index.html')) class NodeGroupViewSet(CustomModelViewSet): """API endpoint for NodeGroup.""" queryset = NodeGroup.objects.all() filterset_class = NodeGroupFilter serializer_class = NodeGroupSerializer lookup_field = 'slug' permission_classes = [IsAuthenticated] def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) return super().create(request, *args, **kwargs) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node group is related to nodes, delete them first') class NodeViewSet(CustomModelViewSet): """API endpoint for Node.""" queryset = Node.objects.all() filterset_class = NodeFilter serializer_class = NodeSerializer # currently all node views require authentication permission_classes = [IsAuthenticated] lookup_field = 'slug' def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().create(request, *args, **kwargs) def update(self, request, *args, **kwargs): # NOTE: super().partial_update calls update(..., partial=True) if not kwargs.get('partial'): # we don't allow full updates - aka PUT raise DRFPermissionDenied() return super().update(request, *args, **kwargs) def partial_update(self, request, slug=None): request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().partial_update(request, slug=slug) @action(detail=True, methods=['get']) def reachable(self, request, slug=None): node = self.get_object() try: res = node.is_reachable() except Exception as e: logger.exception('Unreachable node') res = False return Response(res, status=status.HTTP_200_OK) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node is related to blockchains, delete them first') class BlockchainViewSet(CustomModelViewSet): """API endpoint for Blockchain.""" queryset = Blockchain.objects.all() serializer_class = BlockchainSerializer lookup_field = 'slug' def get_serializer_class(self): # when authenticated we return also NodeSerializer data if self.request.user.is_authenticated: return BlockchainExtendedSerializer return BlockchainSerializer def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['node'] = request.data['node'] return super().create(request, *args, **kwargs) def destroy(self, request, slug=None): instance = self.get_object() message = delete_blockchain.send(instance.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=instance, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) def _abort_previous_tasks(self, blockchain): conflicting_message_ids = DramatiqTask.objects.filter( status=DramatiqTask.Status.IN_PROGRESS, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).values_list('message_id', flat=True) # abort previous conflicting tasks if they exist for conflicting_message_id in conflicting_message_ids: abort(conflicting_message_id) @action(detail=True, methods=['post']) def bootstrap(self, request, slug=None): blockchain = self.get_object() if not blockchain.node.is_reachable: raise APIException(detail='Node is unreachable') self._abort_previous_tasks(blockchain) # create a new task message = bootstrap_blockchain.send(blockchain.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BOOTSTRAP, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=blockchain, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) @action( detail=True, methods=['post'], url_path='bootstrap/abort', url_name='bootstrap-abort', ) def abort_bootstrap(self, request, slug=None): blockchain = self.get_object() self._abort_previous_tasks(blockchain) return Response(status=status.HTTP_200_OK) @action(detail=True, methods=['get']) def graphs(self, request, slug=None): """Returns data for all graphs.""" data = { 'transaction_graph': load_data_from_redis(f'tx_graph__{slug}'), } return Response(data=data, status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def accepted(self, request, slug=None): # NOTE: if node is offline and then you start it again then it will # call this view for each block it will get. In this case there will be # many fast sequential calls to this view, there might be too many # postgres connections opened so view executions might actually fail. # The suggested solution is to comment out 'block_accepted_url' in # node's config file, run the node, wait for it to sync, uncomment # 'block_accepted_url' and then manually bootstrap it. blockchain = self.get_object() # check if new block has been receiver when this blockchain is in the # process of being deleted. deleting = DramatiqTask.objects.filter( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).exists() if deleting: # nothing to do, ignore the new block return Response(status=status.HTTP_404_NOT_FOUND) # get request data height = request.data['data']['header']['height'] hash = request.data['hash'] # prev_hash comes as list of int bytes, so we convert it to hex # NOTE: the same is true for some other data which we currently don't # need so we don't transform it, eg. data.header.kernel_root prev_hash = None if request.data['data']['header']['prev_hash']: prev_hash = bytes(request.data['data']['header']['prev_hash']).hex() logger.info( 'Block accepted', extra={ 'height': height, 'hash': hash, 'prev_hash': prev_hash, 'blockchain': blockchain.slug, }, ) web_socket_msg_type = 'send_block' # handle reorg case # we expect blocks to come ordered by height, there are some edge cases # here which are not handled, but they're unlikely to happen (eg. reorg # happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged'
web_socket_msg = BlockSerializer(new_block).data
18
2023-12-24 22:15:11+00:00
16k
wuhy68/Parameter-Efficient-MoE
train_moe.py
[ { "identifier": "CamelidaeConfig", "path": "camelidae/configuration_camelidae.py", "snippet": "class CamelidaeConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n num_experts (`int`, *optional*, defaults to 8):\n The number of MoE expert\n Example:\n\n ```python\n >>> from transformers import CamelidaeModel, CamelidaeConfig\n\n >>> # Initializing a Camelidae camelidae-7b style configuration\n >>> configuration = CamelidaeConfig()\n\n >>> # Initializing a model from the camelidae-7b style configuration\n >>> model = CamelidaeModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"llama\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_scaling=None,\n moe_dtype=\"bfloat16\",\n moe_scaling=0.25,\n num_experts=8,\n topk=1,\n output_router_logits=True,\n adapter_dim=64,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n\n # for backward compatibility\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.pretraining_tp = pretraining_tp\n self.use_cache = use_cache\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n self.moe_dtype = moe_dtype\n self.moe_scaling = moe_scaling\n self.num_experts = num_experts\n self.topk = topk\n self.output_router_logits = output_router_logits\n\n self.adapter_dim = adapter_dim\n\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )\n\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if (\n rope_scaling_factor is None\n or not isinstance(rope_scaling_factor, float)\n or rope_scaling_factor <= 1.0\n ):\n raise ValueError(\n f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\"\n )" }, { "identifier": "LlamaForCausalLM", "path": "camelidae/modeling_camelidae.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n )\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_router_logits: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MoECausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n output_router_logits = (\n output_router_logits if output_router_logits is not None else self.config.output_router_logits\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n output_router_logits=output_router_logits,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(\n self.vocab_size // self.config.pretraining_tp, dim=0\n )\n logits = [\n F.linear(hidden_states, lm_head_slices[i])\n for i in range(self.config.pretraining_tp)\n ]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n\n loss = None\n\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n aux_loss = None\n if output_router_logits:\n aux_loss = load_balancing_loss_func(\n outputs.router_logits if return_dict else outputs[-1], self.config.num_experts, self.config.topk\n )\n if labels is not None:\n loss += 0.01 * aux_loss\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n if output_router_logits:\n output = (aux_loss,) + output\n return (loss,) + output if loss is not None else output\n\n return MoECausalLMOutputWithPast(\n loss=loss,\n aux_loss=aux_loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n router_logits=outputs.router_logits,\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs,\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(\n past_state.index_select(0, beam_idx.to(past_state.device))\n for past_state in layer_past\n ),\n )\n return reordered_past" }, { "identifier": "get_keys_to_not_convert", "path": "transformers_utils.py", "snippet": "def get_keys_to_not_convert(model):\n r\"\"\"\n An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules\n we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want\n to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in\n int8.\n\n Parameters:\n model (`torch.nn.Module`):\n Input model\n \"\"\"\n # Create a copy of the model and tie the weights, then\n # check if it contains tied weights\n tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`\n tied_model.tie_weights()\n\n tied_params = find_tied_parameters(tied_model)\n # For compatibility with Accelerate < 0.18\n if isinstance(tied_params, dict):\n tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys())\n else:\n tied_keys = sum(tied_params, [])\n has_tied_params = len(tied_keys) > 0\n\n # Check if it is a base model\n is_base_model = not hasattr(model, model.base_model_prefix)\n\n # Ignore this for base models (BertModel, GPT2Model, etc.)\n if (not has_tied_params) and is_base_model:\n return []\n\n adapter_module = []\n for n, p in model.named_parameters():\n if 'adapter' in n:\n adapter_module.append(n)\n\n # otherwise they have an attached head\n list_modules = list(model.named_parameters())\n list_last_module = [list_modules[-1][0]]\n\n # add last module together with tied weights\n intersection = set(list_last_module) - set(tied_keys)\n list_untouched = list(set(tied_keys)) + list(intersection) + adapter_module\n\n # remove \".weight\" from the keys\n names_to_remove = [\".weight\", \".bias\"]\n filtered_module_names = []\n for name in list_untouched:\n for name_to_remove in names_to_remove:\n if name_to_remove in name:\n name = name.replace(name_to_remove, \"\")\n filtered_module_names.append(name)\n \n # print(filtered_module_names)\n return filtered_module_names" }, { "identifier": "_load_pretrained_model", "path": "transformers_utils.py", "snippet": "@classmethod\ndef _load_pretrained_model(\n cls,\n model,\n state_dict,\n loaded_keys,\n resolved_archive_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=False,\n sharded_metadata=None,\n _fast_init=True,\n low_cpu_mem_usage=False,\n device_map=None,\n offload_folder=None,\n offload_state_dict=None,\n dtype=None,\n is_quantized=False,\n keep_in_fp32_modules=None,\n ):\n is_safetensors = False\n if is_quantized:\n from transformers.utils.bitsandbytes import set_module_quantized_tensor_to_device\n\n if device_map is not None and \"disk\" in device_map.values():\n archive_file = (\n resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file\n )\n is_safetensors = archive_file.endswith(\".safetensors\")\n if offload_folder is None and not is_safetensors:\n raise ValueError(\n \"The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder`\"\n \" for them. Alternatively, make sure you have `safetensors` installed if the model you are using\"\n \" offers the weights in this format.\"\n )\n if offload_folder is not None:\n os.makedirs(offload_folder, exist_ok=True)\n if offload_state_dict is None:\n offload_state_dict = True\n\n is_sharded_safetensors = is_safetensors and sharded_metadata is not None\n # Retrieve missing & unexpected_keys\n model_state_dict = model.state_dict()\n expected_keys = list(model_state_dict.keys())\n prefix = model.base_model_prefix\n\n def _fix_key(key):\n if \"beta\" in key:\n return key.replace(\"beta\", \"bias\")\n if \"gamma\" in key:\n return key.replace(\"gamma\", \"weight\")\n return key\n\n original_loaded_keys = loaded_keys\n loaded_keys = [_fix_key(key) for key in loaded_keys]\n\n if len(prefix) > 0:\n has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)\n expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)\n else:\n has_prefix_module = False\n expects_prefix_module = False\n\n # key re-naming operations are never done on the keys\n # that are loaded, but always on the keys of the newly initialized model\n remove_prefix_from_model = not has_prefix_module and expects_prefix_module\n add_prefix_to_model = has_prefix_module and not expects_prefix_module\n\n if remove_prefix_from_model:\n _prefix = f\"{prefix}.\"\n expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)]\n expected_keys = [s[len(_prefix) :] if s.startswith(_prefix) else s for s in expected_keys]\n elif add_prefix_to_model:\n expected_keys = [\".\".join([prefix, s]) for s in expected_keys]\n\n missing_keys = list(set(expected_keys) - set(loaded_keys))\n unexpected_keys = set(loaded_keys) - set(expected_keys)\n # Remove nonpersistent buffers from unexpected keys: they are not in the state dict but will be in the model\n # buffers\n model_buffers = {n for n, _ in model.named_buffers()}\n if remove_prefix_from_model:\n model_buffers = {key[len(_prefix) :] if key.startswith(_prefix) else key for key in model_buffers}\n elif add_prefix_to_model:\n model_buffers = {\".\".join([prefix, key]) for key in model_buffers}\n unexpected_keys = list(unexpected_keys - model_buffers)\n\n model.tie_weights()\n ptrs = collections.defaultdict(list)\n for name, tensor in model.state_dict().items():\n id_tensor = id_tensor_storage(tensor) if tensor.device != torch.device(\"meta\") else id(tensor)\n ptrs[id_tensor].append(name)\n\n # These are all the pointers of shared tensors.\n tied_params = [names for _, names in ptrs.items() if len(names) > 1]\n\n for group in tied_params:\n if remove_prefix_from_model:\n group = [key[len(_prefix) :] if key.startswith(_prefix) else key for key in group]\n elif add_prefix_to_model:\n group = [\".\".join([prefix, key]) for key in group]\n missing_in_group = [k for k in missing_keys if k in group]\n if len(missing_in_group) > 0 and len(missing_in_group) < len(group):\n missing_keys = [k for k in missing_keys if k not in missing_in_group]\n\n # Some models may have keys that are not in the state by design, removing them before needlessly warning\n # the user.\n if cls._keys_to_ignore_on_load_missing is not None:\n for pat in cls._keys_to_ignore_on_load_missing:\n missing_keys = [k for k in missing_keys if re.search(pat, k) is None]\n\n if cls._keys_to_ignore_on_load_unexpected is not None:\n for pat in cls._keys_to_ignore_on_load_unexpected:\n unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]\n\n # retrieve weights on meta device and put them back on CPU.\n # This is not ideal in terms of memory, but if we don't do that not, we can't initialize them in the next step\n if low_cpu_mem_usage:\n for key in missing_keys:\n if key in list(model_state_dict.keys()):\n key = key\n elif f\"{prefix}.{key}\" in list(model_state_dict.keys()):\n key = f\"{prefix}.{key}\"\n elif key.startswith(prefix) and \".\".join(key.split(\".\")[1:]) in list(model_state_dict.keys()):\n key = \".\".join(key.split(\".\")[1:])\n param = model_state_dict[key]\n\n # upcast in fp32 if any\n target_dtype = dtype\n if (\n keep_in_fp32_modules is not None\n and dtype == torch.float16\n and any(module_to_keep_in_fp32 in key for module_to_keep_in_fp32 in keep_in_fp32_modules)\n ):\n target_dtype = torch.float32\n\n if param.device == torch.device(\"meta\"):\n if not (is_quantized):\n set_module_tensor_to_device(model, key, \"cpu\", torch.empty(*param.size(), dtype=target_dtype))\n else:\n set_module_quantized_tensor_to_device(\n model, key, \"cpu\", torch.empty(*param.size(), dtype=target_dtype)\n )\n\n # retrieve unintialized modules and initialize before maybe overriding that with the pretrained weights.\n if _fast_init:\n if remove_prefix_from_model:\n _loaded_keys = [f\"{prefix}.{k}\" for k in loaded_keys]\n elif add_prefix_to_model:\n _loaded_keys = [k[len(prefix) + 1 :] for k in loaded_keys]\n else:\n _loaded_keys = loaded_keys\n set_initialized_submodules(model, _loaded_keys)\n # This will only initialize submodules that are not marked as initialized by the line above.\n model.apply(model._initialize_weights)\n\n # Set some modules to fp32 if any\n if keep_in_fp32_modules is not None:\n for name, param in model.named_parameters():\n if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules):\n param = param.to(torch.float32)\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module:\n start_prefix = cls.base_model_prefix + \".\"\n if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module:\n model_to_load = getattr(model, cls.base_model_prefix)\n base_model_expected_keys = list(model_to_load.state_dict().keys())\n if any(key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys):\n raise ValueError(\n \"The state dictionary of the model you are trying to load is corrupted. Are you sure it was \"\n \"properly saved?\"\n )\n if device_map is not None:\n device_map = {k.replace(f\"{cls.base_model_prefix}.\", \"\"): v for k, v in device_map.items()}\n\n def _find_mismatched_keys(\n state_dict,\n model_state_dict,\n loaded_keys,\n add_prefix_to_model,\n remove_prefix_from_model,\n ignore_mismatched_sizes,\n ):\n mismatched_keys = []\n if ignore_mismatched_sizes:\n for checkpoint_key in loaded_keys:\n # If the checkpoint is sharded, we may not have the key here.\n if checkpoint_key not in state_dict:\n continue\n model_key = checkpoint_key\n if remove_prefix_from_model:\n # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it.\n model_key = f\"{prefix}.{checkpoint_key}\"\n elif add_prefix_to_model:\n # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it.\n model_key = \".\".join(checkpoint_key.split(\".\")[1:])\n\n if (\n model_key in model_state_dict\n and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape\n ):\n mismatched_keys.append(\n (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)\n )\n del state_dict[checkpoint_key]\n\n return mismatched_keys\n\n if resolved_archive_file is not None:\n folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1])\n else:\n folder = None\n if device_map is not None and is_safetensors:\n param_device_map = expand_device_map(device_map, original_loaded_keys)\n\n str_dtype = str(dtype).replace(\"torch.\", \"\") if dtype is not None else \"float32\"\n if sharded_metadata is None:\n archive_file = (\n resolved_archive_file[0]\n if isinstance(resolved_archive_file, (list, tuple))\n else resolved_archive_file\n )\n weight_map = {p: archive_file for p in original_loaded_keys}\n else:\n weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata[\"weight_map\"].items()}\n offload_index = {\n p: {\"safetensors_file\": f, \"weight_name\": p, \"dtype\": str_dtype}\n for p, f in weight_map.items()\n if param_device_map[p] == \"disk\"\n }\n\n if state_dict is not None:\n # Whole checkpoint\n mismatched_keys = _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n add_prefix_to_model,\n remove_prefix_from_model,\n ignore_mismatched_sizes,\n )\n error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix)\n offload_index = None\n else:\n # Sharded checkpoint or whole but low_cpu_mem_usage==True\n\n # This should always be a list but, just to be sure.\n if not isinstance(resolved_archive_file, list):\n resolved_archive_file = [resolved_archive_file]\n\n error_msgs = []\n mismatched_keys = []\n if not is_safetensors:\n offload_index = {} if device_map is not None and \"disk\" in device_map.values() else None\n if offload_state_dict:\n state_dict_folder = tempfile.mkdtemp()\n state_dict_index = {}\n else:\n state_dict_folder = None\n state_dict_index = None\n\n if is_sharded_safetensors:\n disk_only_shard_files = get_disk_only_shard_files(device_map, sharded_metadata=sharded_metadata)\n disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files]\n else:\n disk_only_shard_files = []\n\n if len(resolved_archive_file) > 1:\n resolved_archive_file = logging.tqdm(resolved_archive_file, desc=\"Loading checkpoint shards\")\n for shard_file in resolved_archive_file:\n # Skip the load for shards that only contain disk-offloaded weights when using safetensors for the offload.\n if shard_file in disk_only_shard_files:\n continue\n state_dict = load_state_dict(shard_file)\n\n # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not\n # matching the weights in the model.\n mismatched_keys += _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n add_prefix_to_model,\n remove_prefix_from_model,\n ignore_mismatched_sizes,\n )\n\n if low_cpu_mem_usage:\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n model_to_load,\n state_dict,\n loaded_keys,\n start_prefix,\n expected_keys,\n device_map=device_map,\n offload_folder=offload_folder,\n offload_index=offload_index,\n state_dict_folder=state_dict_folder,\n state_dict_index=state_dict_index,\n dtype=dtype,\n is_quantized=is_quantized,\n is_safetensors=is_safetensors,\n keep_in_fp32_modules=keep_in_fp32_modules,\n )\n error_msgs += new_error_msgs\n else:\n error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix)\n\n # force memory release\n del state_dict\n gc.collect()\n\n if offload_index is not None and len(offload_index) > 0:\n if model != model_to_load:\n # We need to add the prefix of the base model\n prefix = cls.base_model_prefix\n if not is_safetensors:\n for weight_name in offload_index:\n shutil.move(\n os.path.join(offload_folder, f\"{weight_name}.dat\"),\n os.path.join(offload_folder, f\"{prefix}.{weight_name}.dat\"),\n )\n offload_index = {f\"{prefix}.{key}\": value for key, value in offload_index.items()}\n if not is_safetensors:\n save_offload_index(offload_index, offload_folder)\n offload_index = None\n\n if offload_state_dict:\n # Load back temporarily offloaded state dict\n load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder)\n shutil.rmtree(state_dict_folder)\n\n if len(error_msgs) > 0:\n error_msg = \"\\n\\t\".join(error_msgs)\n if \"size mismatch\" in error_msg:\n error_msg += (\n \"\\n\\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.\"\n )\n raise RuntimeError(f\"Error(s) in loading state_dict for {model.__class__.__name__}:\\n\\t{error_msg}\")\n\n if is_quantized:\n unexpected_keys = [elem for elem in unexpected_keys if \"SCB\" not in elem]\n missing_keys = [elem for elem in missing_keys if \"SCB\" not in elem]\n\n missing_keys = list(filter(lambda x: 'adapter' not in x, missing_keys))\n\n if len(unexpected_keys) > 0:\n archs = [] if model.config.architectures is None else model.config.architectures\n warner = logger.warn if model.__class__.__name__ in archs else logger.info\n warner(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or\"\n \" with another architecture (e.g. initializing a BertForSequenceClassification model from a\"\n \" BertForPreTraining model).\\n- This IS NOT expected if you are initializing\"\n f\" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical\"\n \" (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n elif len(mismatched_keys) == 0:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the checkpoint\"\n f\" was trained on, you can already use {model.__class__.__name__} for predictions without further\"\n \" training.\"\n )\n if len(mismatched_keys) > 0:\n mismatched_warning = \"\\n\".join(\n [\n f\"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated\"\n for key, shape1, shape2 in mismatched_keys\n ]\n )\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized because the shapes did not\"\n f\" match:\\n{mismatched_warning}\\nYou should probably TRAIN this model on a down-stream task to be able\"\n \" to use it for predictions and inference.\"\n )\n\n return model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs" } ]
import os import gc import json import math import random import copy import logging import torch import utils import bitsandbytes as bnb import transformers import warnings import transformers.integrations import transformers.modeling_utils from os.path import exists, join, isdir from copy import deepcopy from dataclasses import dataclass, field from typing import Dict, Optional, Sequence, Callable, List, Tuple, Union, Any from torch import nn from torch.utils.data import Dataset from transformers import Trainer, BitsAndBytesConfig, set_seed from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from peft.tuners.lora import LoraLayer from camelidae.configuration_camelidae import CamelidaeConfig from camelidae.modeling_camelidae import LlamaForCausalLM from transformers_utils import ( get_keys_to_not_convert, _load_pretrained_model, )
11,041
@dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: input_ids, labels = tuple( [instance[key] for instance in instances] for key in ("input_ids", "labels") ) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id ) labels = torch.nn.utils.rnn.pad_sequence( labels, batch_first=True, padding_value=IGNORE_INDEX ) return dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ) class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): # print('Saving PEFT checkpoint...') if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join( state.best_model_checkpoint, "adapter_model" ) else: checkpoint_folder = os.path.join( args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}" ) peft_model_path = os.path.join(checkpoint_folder, "adapter_model") model = kwargs["model"] model.save_pretrained(peft_model_path) moe_state = {} for param_tensor in model.state_dict(): if "adapter" in param_tensor: moe_state.update({param_tensor: model.state_dict()[param_tensor]}) # if "adapter" in param_tensor or "norm" in param_tensor: # moe_state.update({param_tensor: model.state_dict()[param_tensor]}) moe_model_path = os.path.join(checkpoint_folder, "moe_model.bin") # print(moe_state.keys()) torch.save(moe_state, moe_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): def touch(fname, times=None): with open(fname, "a"): os.utime(fname, times) touch(join(args.output_dir, "completed")) self.save_model(args, state, kwargs) def make_supervised_data_module( tokenizer: transformers.PreTrainedTokenizer, data_args ) -> Dict: """Make dataset and collator for supervised fine-tuning.""" train_dataset = SupervisedDataset( tokenizer=tokenizer, data_path=data_args.data_path ) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) return dict( train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator ) def find_all_linear_names(model, bits=4): cls = ( bnb.nn.Linear4bit if bits == 4 else (bnb.nn.Linear8bitLt if bits == 8 else torch.nn.Linear) ) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split(".") lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if "lm_head" in lora_module_names: # needed for 16-bit lora_module_names.remove("lm_head") return list(lora_module_names) def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def train(): parser = transformers.HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments) ) model_args, data_args, training_args = parser.parse_args_into_dataclasses() training_args.ddp_find_unused_parameters = False set_seed(42)
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. warnings.filterwarnings("ignore") transformers.integrations.get_keys_to_not_convert = get_keys_to_not_convert transformers.modeling_utils.PreTrainedModel._load_pretrained_model = ( _load_pretrained_model ) IGNORE_INDEX = -100 DEFAULT_PAD_TOKEN = "[PAD]" @dataclass class ModelArguments: model_name_or_path: Optional[str] = field(default="facebook/opt-125m") @dataclass class DataArguments: data_path: str = field( default=None, metadata={"help": "Path to the training data."} ) @dataclass class TrainingArguments(transformers.TrainingArguments): report_to: str = field(default="none") cache_dir: Optional[str] = field(default=None) optim: str = field( default="paged_adamw_32bit" ) # "paged_lion_8bit", "paged_adamw_8bit", "paged_lion_32bit", "paged_adamw_32bit" lr_scheduler_type: str = field( default="constant_with_warmup" ) # "constant", "constant_with_warmup", "cosine", "cosine_with_restarts", "linear" model_max_length: int = field( default=2048, metadata={ "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)." }, ) def _tokenize_fn( strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer ) -> Dict: """Tokenize a list of strings.""" tokenized_list = [ tokenizer( text, return_tensors="pt", padding="longest", max_length=tokenizer.model_max_length, truncation=True, ) for text in strings ] input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list] input_ids_lens = labels_lens = [ tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list ] return dict( input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens, ) def preprocess( sources: Sequence[str], targets: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, ) -> Dict: """Preprocess the data by tokenizing.""" examples = [s + t for s, t in zip(sources, targets)] examples_tokenized, sources_tokenized = [ _tokenize_fn(strings, tokenizer) for strings in (examples, sources) ] input_ids = examples_tokenized["input_ids"] labels = copy.deepcopy(input_ids) for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]): label[:source_len] = IGNORE_INDEX return dict(input_ids=input_ids, labels=labels) class SupervisedDataset(Dataset): """Dataset for supervised fine-tuning.""" def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer): super(SupervisedDataset, self).__init__() logging.warning("Loading data: {}".format(data_path)) data_list = utils.jload(data_path) # Preprocess Data logging.warning("Processing data") self.tokenizer = tokenizer self.sources = [] self.targets = [] for idx in range(len(data_list)): data = data_list[idx] corpus = data["corpus"] if corpus != "": # pretrain mode source = f"{tokenizer.bos_token}" self.sources.append(source) target = f"{corpus}{tokenizer.eos_token}" self.targets.append(target) else: # instruction mode instruction = data["instruction"] conversation = data["conversation"] if len(conversation) == 1: if instruction == "": source = f"{tokenizer.bos_token}" else: source = f"{tokenizer.bos_token}### System:\n{instruction}\n" source += ( f"### Human:\n{conversation[0]['input']}\n### Assistant:\n" ) self.sources.append(source) target = f"{conversation[0]['output']}{tokenizer.eos_token}" self.targets.append(target) # else: # dialog mode del data_list gc.collect() # ## Debug Mode # self.sources = self.sources[:10000] # self.targets = self.targets[:10000] # logging.warning("Tokenizing inputs... This may take some time...") # data_dict = preprocess(sources, targets, tokenizer) # del sources, targets # gc.collect() # self.input_ids = data_dict["input_ids"] # self.labels = data_dict["labels"] # del data_dict # gc.collect() logging.warning("there are {} data in dataset".format(len(self.sources))) def __len__(self): return len(self.sources) def __getitem__(self, i): # return dict(input_ids=self.input_ids[i], labels=self.labels[i]) source = [self.sources[i]] target = [self.targets[i]] data_dict = preprocess(source, target, self.tokenizer) input_ids = data_dict["input_ids"][0] labels = data_dict["labels"][0] return dict(input_ids=input_ids, labels=labels) @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: input_ids, labels = tuple( [instance[key] for instance in instances] for key in ("input_ids", "labels") ) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id ) labels = torch.nn.utils.rnn.pad_sequence( labels, batch_first=True, padding_value=IGNORE_INDEX ) return dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ) class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): # print('Saving PEFT checkpoint...') if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join( state.best_model_checkpoint, "adapter_model" ) else: checkpoint_folder = os.path.join( args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}" ) peft_model_path = os.path.join(checkpoint_folder, "adapter_model") model = kwargs["model"] model.save_pretrained(peft_model_path) moe_state = {} for param_tensor in model.state_dict(): if "adapter" in param_tensor: moe_state.update({param_tensor: model.state_dict()[param_tensor]}) # if "adapter" in param_tensor or "norm" in param_tensor: # moe_state.update({param_tensor: model.state_dict()[param_tensor]}) moe_model_path = os.path.join(checkpoint_folder, "moe_model.bin") # print(moe_state.keys()) torch.save(moe_state, moe_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): def touch(fname, times=None): with open(fname, "a"): os.utime(fname, times) touch(join(args.output_dir, "completed")) self.save_model(args, state, kwargs) def make_supervised_data_module( tokenizer: transformers.PreTrainedTokenizer, data_args ) -> Dict: """Make dataset and collator for supervised fine-tuning.""" train_dataset = SupervisedDataset( tokenizer=tokenizer, data_path=data_args.data_path ) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) return dict( train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator ) def find_all_linear_names(model, bits=4): cls = ( bnb.nn.Linear4bit if bits == 4 else (bnb.nn.Linear8bitLt if bits == 8 else torch.nn.Linear) ) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split(".") lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if "lm_head" in lora_module_names: # needed for 16-bit lora_module_names.remove("lm_head") return list(lora_module_names) def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def train(): parser = transformers.HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments) ) model_args, data_args, training_args = parser.parse_args_into_dataclasses() training_args.ddp_find_unused_parameters = False set_seed(42)
model_config = CamelidaeConfig.from_pretrained(model_args.model_name_or_path)
0
2023-12-22 02:54:29+00:00
16k
lchen1019/Image_Cropper
ISAT/widgets/mainwindow.py
[ { "identifier": "Ui_MainWindow", "path": "ISAT/ui/MainWindow.py", "snippet": "class Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1280, 764)\n MainWindow.setMinimumSize(QtCore.QSize(800, 600))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n MainWindow.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/icons/icons/isat_bg_50x25.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n MainWindow.setWindowIcon(icon)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setSpacing(0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setEnabled(True)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1280, 24))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menubar.setFont(font)\n self.menubar.setAutoFillBackground(False)\n self.menubar.setDefaultUp(False)\n self.menubar.setNativeMenuBar(True)\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuFile.setFont(font)\n self.menuFile.setObjectName(\"menuFile\")\n self.menuView = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuView.setFont(font)\n self.menuView.setObjectName(\"menuView\")\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/icon/icons/翻译_translate.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n MainWindow.setMenuBar(self.menubar)\n\n self.menuTools = QtWidgets.QMenu(self.menubar)\n self.menuTools.setEnabled(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuTools.setFont(font)\n self.menuTools.setObjectName(\"menuTools\")\n\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.toolBar.setFont(font)\n self.toolBar.setIconSize(QtCore.QSize(24, 24))\n self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)\n self.toolBar.setFloatable(False)\n self.toolBar.setObjectName(\"toolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.dockWidgetContents_2 = QtWidgets.QWidget()\n self.dockWidgetContents_2.setObjectName(\"dockWidgetContents_2\")\n self.dockWidgetContents_3 = QtWidgets.QWidget()\n self.dockWidgetContents_3.setObjectName(\"dockWidgetContents_3\")\n self.files_dock = QtWidgets.QDockWidget(MainWindow)\n self.files_dock.setObjectName(\"files_dock\")\n self.dockWidgetContents = QtWidgets.QWidget()\n self.dockWidgetContents.setObjectName(\"dockWidgetContents\")\n self.files_dock.setWidget(self.dockWidgetContents)\n MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.files_dock)\n self.dockWidgetContents_4 = QtWidgets.QWidget()\n self.dockWidgetContents_4.setObjectName(\"dockWidgetContents_4\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.dockWidgetContents_4)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.actionOpen_dir = QtWidgets.QAction(MainWindow)\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/icon/icons/照片_pic.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionOpen_dir.setIcon(icon2)\n self.actionOpen_dir.setObjectName(\"actionOpen_dir\")\n self.actionZoom_in = QtWidgets.QAction(MainWindow)\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icon/icons/放大_zoom-in.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionZoom_in.setIcon(icon3)\n self.actionZoom_in.setObjectName(\"actionZoom_in\")\n self.actionZoom_out = QtWidgets.QAction(MainWindow)\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\":/icon/icons/缩小_zoom-out.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionZoom_out.setIcon(icon4)\n self.actionZoom_out.setObjectName(\"actionZoom_out\")\n self.actionFit_wiondow = QtWidgets.QAction(MainWindow)\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\":/icon/icons/全宽_fullwidth.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionFit_wiondow.setIcon(icon5)\n self.actionFit_wiondow.setObjectName(\"actionFit_wiondow\")\n self.actionSetting = QtWidgets.QAction(MainWindow)\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(\":/icon/icons/设置_setting-two.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSetting.setIcon(icon6)\n self.actionSetting.setObjectName(\"actionSetting\")\n self.actionExit = QtWidgets.QAction(MainWindow)\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(\":/icon/icons/开关_power.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionExit.setIcon(icon7)\n self.actionExit.setObjectName(\"actionExit\")\n self.actionSave_dir = QtWidgets.QAction(MainWindow)\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\":/icon/icons/文件夹-开_folder-open.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSave_dir.setIcon(icon8)\n self.actionSave_dir.setObjectName(\"actionSave_dir\")\n self.actionSave = QtWidgets.QAction(MainWindow)\n icon9 = QtGui.QIcon()\n icon9.addPixmap(QtGui.QPixmap(\":/icon/icons/保存_save.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSave.setIcon(icon9)\n self.actionSave.setObjectName(\"actionSave\")\n self.actionPrev = QtWidgets.QAction(MainWindow)\n self.actionPrev.setCheckable(False)\n icon10 = QtGui.QIcon()\n icon10.addPixmap(QtGui.QPixmap(\":/icon/icons/上一步_back.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionPrev.setIcon(icon10)\n self.actionPrev.setMenuRole(QtWidgets.QAction.TextHeuristicRole)\n self.actionPrev.setPriority(QtWidgets.QAction.NormalPriority)\n self.actionPrev.setObjectName(\"actionPrev\")\n self.actionNext = QtWidgets.QAction(MainWindow)\n icon11 = QtGui.QIcon()\n icon11.addPixmap(QtGui.QPixmap(\":/icon/icons/下一步_next.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionNext.setIcon(icon11)\n self.actionNext.setObjectName(\"actionNext\")\n self.actionShortcut = QtWidgets.QAction(MainWindow)\n icon12 = QtGui.QIcon()\n icon12.addPixmap(QtGui.QPixmap(\":/icon/icons/键盘_keyboard-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionShortcut.setIcon(icon12)\n self.actionShortcut.setObjectName(\"actionShortcut\")\n self.actionAbout = QtWidgets.QAction(MainWindow)\n icon13 = QtGui.QIcon()\n icon13.addPixmap(QtGui.QPixmap(\":/icon/icons/我的_me.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionAbout.setIcon(icon13)\n self.actionAbout.setObjectName(\"actionAbout\")\n self.actionDelete = QtWidgets.QAction(MainWindow)\n self.actionDelete.setEnabled(False)\n icon15 = QtGui.QIcon()\n icon15.addPixmap(QtGui.QPixmap(\":/icon/icons/删除_delete.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionDelete.setIcon(icon15)\n self.actionDelete.setObjectName(\"actionDelete\")\n self.actionBit_map = QtWidgets.QAction(MainWindow)\n self.actionBit_map.setCheckable(False)\n self.actionBit_map.setIcon(icon2)\n self.actionBit_map.setObjectName(\"actionBit_map\")\n self.actionEdit = QtWidgets.QAction(MainWindow)\n self.actionEdit.setEnabled(False)\n icon16 = QtGui.QIcon()\n icon16.addPixmap(QtGui.QPixmap(\":/icon/icons/编辑_edit.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionEdit.setIcon(icon16)\n self.actionEdit.setObjectName(\"actionEdit\")\n self.actionTo_top = QtWidgets.QAction(MainWindow)\n self.actionTo_top.setEnabled(False)\n icon17 = QtGui.QIcon()\n icon17.addPixmap(QtGui.QPixmap(\":/icon/icons/去顶部_to-top.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionTo_top.setIcon(icon17)\n self.actionTo_top.setObjectName(\"actionTo_top\")\n self.actionTo_bottom = QtWidgets.QAction(MainWindow)\n self.actionTo_bottom.setEnabled(False)\n icon18 = QtGui.QIcon()\n icon18.addPixmap(QtGui.QPixmap(\":/icon/icons/去底部_to-bottom.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionTo_bottom.setIcon(icon18)\n self.actionTo_bottom.setObjectName(\"actionTo_bottom\")\n self.actionChinese = QtWidgets.QAction(MainWindow)\n self.actionChinese.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionChinese.setFont(font)\n self.actionChinese.setObjectName(\"actionChinese\")\n self.actionEnglish = QtWidgets.QAction(MainWindow)\n self.actionEnglish.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionEnglish.setFont(font)\n self.actionEnglish.setObjectName(\"actionEnglish\")\n self.actionBackspace = QtWidgets.QAction(MainWindow)\n icon19 = QtGui.QIcon()\n icon19.addPixmap(QtGui.QPixmap(\":/icon/icons/删除_delete-two.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionBackspace.setIcon(icon19)\n self.actionBackspace.setObjectName(\"actionBackspace\")\n self.actionCancel = QtWidgets.QAction(MainWindow)\n icon20 = QtGui.QIcon()\n icon20.addPixmap(QtGui.QPixmap(\":/icon/icons/关闭_close-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionCancel.setIcon(icon20)\n self.actionCancel.setObjectName(\"actionCancel\")\n self.actionFinish = QtWidgets.QAction(MainWindow)\n icon21 = QtGui.QIcon()\n icon21.addPixmap(QtGui.QPixmap(\":/icon/icons/校验_check-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionFinish.setIcon(icon21)\n self.actionFinish.setObjectName(\"actionFinish\")\n self.actionPolygon = QtWidgets.QAction(MainWindow)\n icon22 = QtGui.QIcon()\n icon22.addPixmap(QtGui.QPixmap(\":/icon/icons/锚点_anchor.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionPolygon.setIcon(icon22)\n self.actionPolygon.setObjectName(\"actionPolygon\")\n self.actionVisible = QtWidgets.QAction(MainWindow)\n icon23 = QtGui.QIcon()\n icon23.addPixmap(QtGui.QPixmap(\":/icon/icons/眼睛_eyes.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionVisible.setIcon(icon23)\n self.actionVisible.setObjectName(\"actionVisible\")\n self.actionContour_Max_only = QtWidgets.QAction(MainWindow)\n self.actionContour_Max_only.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_Max_only.setFont(font)\n self.actionContour_Max_only.setObjectName(\"actionContour_Max_only\")\n self.actionContour_External = QtWidgets.QAction(MainWindow)\n self.actionContour_External.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_External.setFont(font)\n self.actionContour_External.setObjectName(\"actionContour_External\")\n self.actionContour_All = QtWidgets.QAction(MainWindow)\n self.actionContour_All.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_All.setFont(font)\n self.actionContour_All.setObjectName(\"actionContour_All\")\n self.actionModel_manage = QtWidgets.QAction(MainWindow)\n icon24 = QtGui.QIcon()\n icon24.addPixmap(QtGui.QPixmap(\":/icon/icons/列表_list-middle.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionModel_manage.setIcon(icon24)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionModel_manage.setFont(font)\n self.actionModel_manage.setObjectName(\"actionModel_manage\")\n self.actionConverter = QtWidgets.QAction(MainWindow)\n icon25 = QtGui.QIcon()\n icon25.addPixmap(QtGui.QPixmap(\":/icon/icons/转换文件夹1_folder-conversion-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionConverter.setIcon(icon25)\n self.actionConverter.setObjectName(\"actionConverter\")\n self.menuFile.addAction(self.actionOpen_dir)\n self.menuFile.addAction(self.actionSave_dir)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionPrev)\n self.menuFile.addAction(self.actionNext)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionSetting)\n self.menuFile.addAction(self.actionExit)\n self.menuView.addSeparator()\n self.menuView.addAction(self.actionZoom_in)\n self.menuView.addAction(self.actionZoom_out)\n self.menuView.addAction(self.actionFit_wiondow)\n self.menuView.addSeparator()\n self.menuView.addAction(self.actionBit_map)\n self.menuView.addSeparator()\n self.menuTools.addSeparator()\n self.menuTools.addAction(self.actionConverter)\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuView.menuAction())\n self.menubar.addAction(self.menuTools.menuAction())\n\n self.toolBar.addAction(self.actionPrev)\n self.toolBar.addAction(self.actionNext)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.actionPolygon)\n self.toolBar.addAction(self.actionFinish)\n self.toolBar.addAction(self.actionCancel)\n self.toolBar.addAction(self.actionSave)\n self.toolBar.addAction(self.actionDelete)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.actionZoom_in)\n self.toolBar.addAction(self.actionZoom_out)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ISAT\"))\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\n self.menuView.setTitle(_translate(\"MainWindow\", \"View\"))\n self.menuTools.setTitle(_translate(\"MainWindow\", \"Tools\"))\n self.toolBar.setWindowTitle(_translate(\"MainWindow\", \"toolBar\"))\n self.files_dock.setWindowTitle(_translate(\"MainWindow\", \"Files\"))\n self.actionOpen_dir.setText(_translate(\"MainWindow\", \"Images dir\"))\n self.actionOpen_dir.setStatusTip(_translate(\"MainWindow\", \"Open images dir.\"))\n self.actionZoom_in.setText(_translate(\"MainWindow\", \"Zoom in\"))\n self.actionZoom_in.setStatusTip(_translate(\"MainWindow\", \"Zoom in.\"))\n self.actionZoom_out.setText(_translate(\"MainWindow\", \"Zoom out\"))\n self.actionZoom_out.setStatusTip(_translate(\"MainWindow\", \"Zoom out.\"))\n self.actionFit_wiondow.setText(_translate(\"MainWindow\", \"Fit window\"))\n self.actionFit_wiondow.setToolTip(_translate(\"MainWindow\", \"Fit window\"))\n self.actionFit_wiondow.setStatusTip(_translate(\"MainWindow\", \"Fit window.\"))\n self.actionFit_wiondow.setShortcut(_translate(\"MainWindow\", \"F\"))\n self.actionSetting.setText(_translate(\"MainWindow\", \"Setting\"))\n self.actionSetting.setStatusTip(_translate(\"MainWindow\", \"Setting.\"))\n self.actionExit.setText(_translate(\"MainWindow\", \"Exit\"))\n self.actionExit.setToolTip(_translate(\"MainWindow\", \"Exit\"))\n self.actionExit.setStatusTip(_translate(\"MainWindow\", \"Exit.\"))\n self.actionSave_dir.setText(_translate(\"MainWindow\", \"Label dir\"))\n self.actionSave_dir.setStatusTip(_translate(\"MainWindow\", \"Open label dir.\"))\n self.actionSave.setText(_translate(\"MainWindow\", \"Save\"))\n self.actionSave.setStatusTip(_translate(\"MainWindow\", \"Save annotation.\"))\n self.actionSave.setShortcut(_translate(\"MainWindow\", \"S\"))\n self.actionPrev.setText(_translate(\"MainWindow\", \"Prev image\"))\n self.actionPrev.setToolTip(_translate(\"MainWindow\", \"Prev image\"))\n self.actionPrev.setStatusTip(_translate(\"MainWindow\", \"Prev image.\"))\n self.actionPrev.setShortcut(_translate(\"MainWindow\", \"A\"))\n self.actionNext.setText(_translate(\"MainWindow\", \"Next image\"))\n self.actionNext.setToolTip(_translate(\"MainWindow\", \"Next image\"))\n self.actionNext.setStatusTip(_translate(\"MainWindow\", \"Next image.\"))\n self.actionNext.setShortcut(_translate(\"MainWindow\", \"D\"))\n self.actionShortcut.setText(_translate(\"MainWindow\", \"Shortcut\"))\n self.actionAbout.setText(_translate(\"MainWindow\", \"About\"))\n self.actionDelete.setText(_translate(\"MainWindow\", \"Delete\"))\n self.actionDelete.setToolTip(_translate(\"MainWindow\", \"Delete polygon\"))\n self.actionDelete.setStatusTip(_translate(\"MainWindow\", \"Delete polygon.\"))\n self.actionDelete.setShortcut(_translate(\"MainWindow\", \"Del\"))\n self.actionBit_map.setText(_translate(\"MainWindow\", \"Bit map\"))\n self.actionBit_map.setStatusTip(_translate(\"MainWindow\", \"Show instance or segmeent state.\"))\n self.actionBit_map.setShortcut(_translate(\"MainWindow\", \"Space\"))\n self.actionEdit.setText(_translate(\"MainWindow\", \"Edit\"))\n self.actionEdit.setToolTip(_translate(\"MainWindow\", \"Edit polygon\"))\n self.actionEdit.setStatusTip(_translate(\"MainWindow\", \"Edit polygon attribute.\"))\n self.actionTo_top.setText(_translate(\"MainWindow\", \"To top\"))\n self.actionTo_top.setToolTip(_translate(\"MainWindow\", \"Move polygon to top layer\"))\n self.actionTo_top.setStatusTip(_translate(\"MainWindow\", \"Move polygon to top layer.\"))\n self.actionTo_top.setShortcut(_translate(\"MainWindow\", \"T\"))\n self.actionTo_bottom.setText(_translate(\"MainWindow\", \"To bottom\"))\n self.actionTo_bottom.setToolTip(_translate(\"MainWindow\", \"Move polygon to bottom layer\"))\n self.actionTo_bottom.setStatusTip(_translate(\"MainWindow\", \"Move polygon to bottom layer.\"))\n self.actionTo_bottom.setShortcut(_translate(\"MainWindow\", \"B\"))\n self.actionChinese.setText(_translate(\"MainWindow\", \"中文\"))\n self.actionEnglish.setText(_translate(\"MainWindow\", \"English\"))\n self.actionBackspace.setText(_translate(\"MainWindow\", \"Backspace\"))\n self.actionBackspace.setToolTip(_translate(\"MainWindow\", \"Backspace\"))\n self.actionBackspace.setStatusTip(_translate(\"MainWindow\", \"Backspace.\"))\n self.actionBackspace.setShortcut(_translate(\"MainWindow\", \"Z\"))\n self.actionCancel.setText(_translate(\"MainWindow\", \"Cancel\"))\n self.actionCancel.setToolTip(_translate(\"MainWindow\", \"Annotate canceled\"))\n self.actionCancel.setStatusTip(_translate(\"MainWindow\", \"Annotate canceled.\"))\n self.actionCancel.setShortcut(_translate(\"MainWindow\", \"Esc\"))\n self.actionFinish.setText(_translate(\"MainWindow\", \"Finish\"))\n self.actionFinish.setToolTip(_translate(\"MainWindow\", \"Annotate finished\"))\n self.actionFinish.setStatusTip(_translate(\"MainWindow\", \"Annotate finished.\"))\n self.actionFinish.setShortcut(_translate(\"MainWindow\", \"E\"))\n self.actionPolygon.setText(_translate(\"MainWindow\", \"Polygon\"))\n self.actionPolygon.setToolTip(_translate(\"MainWindow\", \"Draw polygon\"))\n self.actionPolygon.setStatusTip(_translate(\"MainWindow\", \"Accurately annotate by drawing polygon. \"))\n self.actionPolygon.setShortcut(_translate(\"MainWindow\", \"Q\"))\n self.actionVisible.setText(_translate(\"MainWindow\", \"Visible\"))\n self.actionVisible.setToolTip(_translate(\"MainWindow\", \"Visible\"))\n self.actionVisible.setStatusTip(_translate(\"MainWindow\", \"Visible.\"))\n self.actionVisible.setShortcut(_translate(\"MainWindow\", \"V\"))\n self.actionContour_Max_only.setText(_translate(\"MainWindow\", \"Max only\"))\n self.actionContour_Max_only.setStatusTip(_translate(\"MainWindow\", \"Max contour save only.\"))\n self.actionContour_Max_only.setWhatsThis(_translate(\"MainWindow\", \"Max contour save only.\"))\n self.actionContour_External.setText(_translate(\"MainWindow\", \"External\"))\n self.actionContour_External.setStatusTip(_translate(\"MainWindow\", \"External contour save only.\"))\n self.actionContour_External.setWhatsThis(_translate(\"MainWindow\", \"External contour save only.\"))\n self.actionContour_All.setText(_translate(\"MainWindow\", \"All\"))\n self.actionContour_All.setStatusTip(_translate(\"MainWindow\", \"All contour save.\"))\n self.actionContour_All.setWhatsThis(_translate(\"MainWindow\", \"All contour save.\"))\n self.actionModel_manage.setText(_translate(\"MainWindow\", \"Model manage\"))\n self.actionModel_manage.setStatusTip(_translate(\"MainWindow\", \"Model manage.\"))\n self.actionModel_manage.setWhatsThis(_translate(\"MainWindow\", \"Model manage.\"))\n self.actionConverter.setText(_translate(\"MainWindow\", \"Converter\"))" }, { "identifier": "FilesDockWidget", "path": "ISAT/widgets/files_dock_widget.py", "snippet": "class FilesDockWidget(QtWidgets.QWidget, Ui_Form):\n def __init__(self, mainwindow):\n super(FilesDockWidget, self).__init__()\n self.setupUi(self)\n self.mainwindow = mainwindow\n self.listWidget.clicked.connect(self.listwidget_doubleclick)\n self.lineEdit_jump.returnPressed.connect(self.mainwindow.jump_to)\n\n def generate_item_and_itemwidget(self, file_name):\n item = QtWidgets.QListWidgetItem()\n item.setSizeHint(QtCore.QSize(200, 30))\n item_widget = QtWidgets.QWidget()\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(9, 1, 9, 1)\n\n state_color = QtWidgets.QLabel()\n state_color.setFixedWidth(5)\n state_color.setStyleSheet(\"background-color: {};\".format('#999999'))\n state_color.setObjectName('state_color')\n layout.addWidget(state_color)\n\n category = QtWidgets.QLabel(file_name)\n category.setObjectName('category')\n layout.addWidget(category)\n\n item_widget.setLayout(layout)\n return item, item_widget\n\n def update_widget(self):\n self.listWidget.clear()\n if self.mainwindow.files_list is None:\n return\n\n for file_path in self.mainwindow.files_list:\n _, file_name = os.path.split(file_path)\n item = QtWidgets.QListWidgetItem()\n item.setSizeHint(QtCore.QSize(200, 30))\n # item, item_widget = self.generate_item_and_itemwidget(file_name)\n\n item.setText(file_name)\n self.listWidget.addItem(item)\n # self.listWidget.setItemWidget(item, item_widget)\n\n self.label_all.setText('{}'.format(len(self.mainwindow.files_list)))\n\n def set_select(self, row):\n self.listWidget.setCurrentRow(row)\n\n def listwidget_doubleclick(self):\n row = self.listWidget.currentRow()\n self.mainwindow.current_index = row\n self.mainwindow.show_image(row)" }, { "identifier": "AnnotationScene", "path": "ISAT/widgets/canvas.py", "snippet": "class AnnotationScene(QtWidgets.QGraphicsScene):\n def __init__(self, mainwindow):\n super(AnnotationScene, self).__init__()\n self.mainwindow = mainwindow\n self.image_item:QtWidgets.QGraphicsPixmapItem = None\n self.image_data = None\n self.current_graph:QGraphicsRectItem = None\n self.mode = STATUSMode.VIEW\n self.click = CLICKMode.POSITIVE\n self.click_points = []\n\n self.mask_alpha = 0.5\n self.top_layer = 1\n\n self.guide_line_x:QtWidgets.QGraphicsLineItem = None\n self.guide_line_y:QtWidgets.QGraphicsLineItem = None\n\n # 拖动鼠标描点 \n self.last_draw_time = time.time()\n self.draw_interval = 0.15\n self.pressd = False\n\n def load_image(self, image_path:str):\n self.clear()\n\n self.image_data = np.array(Image.open(image_path))\n \n self.image_item = QtWidgets.QGraphicsPixmapItem()\n self.image_item.setZValue(0)\n self.addItem(self.image_item)\n self.image_item.setPixmap(QtGui.QPixmap(image_path))\n self.setSceneRect(self.image_item.boundingRect())\n \n def start_draw_polygon(self):\n if self.mode != STATUSMode.VIEW:\n return\n self.change_mode_to_create()\n if self.mode == STATUSMode.CREATE:\n self.start_draw()\n \n def start_draw(self):\n print('start_draw')\n self.current_graph = QGraphicsRectItem()\n self.addItem(self.current_graph)\n \n def change_mode_to_view(self):\n self.mode = STATUSMode.VIEW\n self.image_item.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))\n self.mainwindow.actionPrev.setEnabled(True)\n self.mainwindow.actionNext.setEnabled(True)\n\n self.mainwindow.actionPolygon.setEnabled(self.mainwindow.can_be_annotated)\n self.mainwindow.actionBackspace.setEnabled(False)\n self.mainwindow.actionFinish.setEnabled(False)\n self.mainwindow.actionCancel.setEnabled(False)\n\n self.mainwindow.actionEdit.setEnabled(False)\n self.mainwindow.actionDelete.setEnabled(False)\n self.mainwindow.actionSave.setEnabled(self.mainwindow.can_be_annotated)\n\n def change_mode_to_create(self):\n if self.image_item is None:\n return\n self.mode = STATUSMode.CREATE\n self.image_item.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))\n self.mainwindow.actionPrev.setEnabled(False)\n self.mainwindow.actionNext.setEnabled(False)\n\n self.mainwindow.actionPolygon.setEnabled(False)\n self.mainwindow.actionBackspace.setEnabled(True)\n self.mainwindow.actionFinish.setEnabled(True)\n self.mainwindow.actionCancel.setEnabled(True)\n\n self.mainwindow.actionEdit.setEnabled(False)\n self.mainwindow.actionDelete.setEnabled(False)\n self.mainwindow.actionSave.setEnabled(False)\n\n def finish_draw(self):\n print('finish_draw')\n print(self.click_points)\n\n if self.current_graph is None:\n self.click_points.clear()\n return\n \n # 保存当前矩形\n print(self.click_points)\n print(self.mainwindow.rects)\n rect = {\n \"point1-x\": self.click_points[0][0],\n \"point1-y\": self.click_points[0][1],\n \"point2-x\": self.click_points[1][0],\n \"point2-y\": self.click_points[1][1],\n }\n print(rect)\n self.mainwindow.rects.append(rect)\n\n # 删除当前绘制对象\n self.click_points.clear()\n self.removeItem(self.current_graph)\n self.current_graph = None\n\n self.change_mode_to_view()\n\n\n def cancel_draw(self):\n if self.current_graph is None:\n return\n self.removeItem(self.current_graph)\n self.current_graph = None\n self.change_mode_to_view()\n self.click_points.clear()\n \n\n def mousePressEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'):\n if self.mode == STATUSMode.VIEW:\n return\n sceneX, sceneY = event.scenePos().x(), event.scenePos().y()\n sceneX = 0 if sceneX < 0 else sceneX\n sceneX = self.width()-1 if sceneX > self.width()-1 else sceneX\n sceneY = 0 if sceneY < 0 else sceneY\n sceneY = self.height()-1 if sceneY > self.height()-1 else sceneY\n print(sceneX, sceneY)\n\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n print('left click')\n self.pressd = True\n\n if len(self.click_points) <= 2:\n self.click_points.append([sceneX, sceneY])\n\n if len(self.click_points) == 2:\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n p1 = self.click_points[0]\n p2 = self.click_points[1]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n super(AnnotationScene, self).mousePressEvent(event)\n\n # 拖动鼠标描点 \n def mouseReleaseEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'): \n self.pressd = False\n super(AnnotationScene, self).mouseReleaseEvent(event)\n \n def eventFilter(self, obj, event):\n if event.type() == QEvent.GraphicsSceneMouseMove and event.buttons() == Qt.LeftButton:\n self.mouseMoveEvent(event)\n return True\n return super(RectangleScene, self).eventFilter(obj, event)\n\n def mouseMoveEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'):\n # 拖动鼠标描点\n pos = event.scenePos()\n if pos.x() < 0: pos.setX(0)\n if pos.x() > self.width()-1: pos.setX(self.width()-1)\n if pos.y() < 0: pos.setY(0)\n if pos.y() > self.height()-1: pos.setY(self.height()-1)\n\n if len(self.click_points) == 1:\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n p1 = self.click_points[0]\n p2 = [pos.x(), pos.y()]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n else:\n return\n\n # 状态栏,显示当前坐标\n if self.image_data is not None:\n x, y = round(pos.x()), round(pos.y())\n self.mainwindow.labelCoord.setText('xy: ({:>4d},{:>4d})'.format(x, y))\n\n data = self.image_data[y][x]\n if self.image_data.ndim == 2:\n self.mainwindow.labelData.setText('pix: [{:^3d}]'.format(data))\n elif self.image_data.ndim == 3:\n if len(data) == 3:\n self.mainwindow.labelData.setText('rgb: [{:>3d},{:>3d},{:>3d}]'.format(data[0], data[1], data[2]))\n else:\n self.mainwindow.labelData.setText('pix: [{}]'.format(data))\n\n super(AnnotationScene, self).mouseMoveEvent(event)\n \n def show_all(self):\n print('show_all')\n\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n for rect in self.mainwindow.rects:\n self.current_graph = QGraphicsRectItem()\n self.addItem(self.current_graph)\n p1 = [rect[\"point1-x\"], rect[\"point1-y\"]]\n p2 = [rect[\"point2-x\"], rect[\"point2-y\"]]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n\n def hide_all(self):\n print('hide_all')\n items_to_remove = [item for item in self.items() if isinstance(item, QGraphicsRectItem)]\n for item in items_to_remove:\n self.removeItem(item)" }, { "identifier": "AnnotationView", "path": "ISAT/widgets/canvas.py", "snippet": "class AnnotationView(QtWidgets.QGraphicsView):\n def __init__(self, parent=None):\n super(AnnotationView, self).__init__(parent)\n self.setMouseTracking(True)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)\n self.setDragMode(QtWidgets.QGraphicsView.DragMode.ScrollHandDrag)\n self.factor = 1.2\n\n def wheelEvent(self, event: QtGui.QWheelEvent):\n angel = event.angleDelta()\n angelX, angelY = angel.x(), angel.y()\n point = event.pos() # 当前鼠标位置\n if angelY > 0:\n self.zoom(self.factor, point)\n else:\n self.zoom(1 / self.factor, point)\n\n def zoom_in(self):\n self.zoom(self.factor)\n\n def zoom_out(self):\n self.zoom(1/self.factor)\n\n def zoomfit(self):\n self.fitInView(0, 0, self.scene().width(), self.scene().height(), QtCore.Qt.AspectRatioMode.KeepAspectRatio)\n\n def zoom(self, factor, point=None):\n mouse_old = self.mapToScene(point) if point is not None else None\n # 缩放比例\n\n pix_widget = self.transform().scale(factor, factor).mapRect(QtCore.QRectF(0, 0, 1, 1)).width()\n if pix_widget > 30 and factor > 1: return\n if pix_widget < 0.01 and factor < 1: return\n\n self.scale(factor, factor)\n if point is not None:\n mouse_now = self.mapToScene(point)\n center_now = self.mapToScene(self.viewport().width() // 2, self.viewport().height() // 2)\n center_new = mouse_old - mouse_now + center_now\n self.centerOn(center_new)" }, { "identifier": "STATUSMode", "path": "ISAT/configs.py", "snippet": "class STATUSMode(Enum):\n VIEW = 0\n CREATE = 1\n EDIT = 2" }, { "identifier": "MAPMode", "path": "ISAT/configs.py", "snippet": "class MAPMode(Enum):\n LABEL = 0\n SEMANTIC = 1\n INSTANCE = 2" }, { "identifier": "load_config", "path": "ISAT/configs.py", "snippet": "def load_config(file):\n with open(file, 'rb')as f:\n cfg = yaml.load(f.read(), Loader=yaml.FullLoader)\n return cfg" }, { "identifier": "save_config", "path": "ISAT/configs.py", "snippet": "def save_config(cfg, file):\n s = yaml.dump(cfg)\n with open(file, 'w') as f:\n f.write(s)\n return True" }, { "identifier": "CONFIG_FILE", "path": "ISAT/configs.py", "snippet": "CONFIG_FILE = os.path.join(ISAT_ROOT, 'isat.yaml')" }, { "identifier": "DEFAULT_CONFIG_FILE", "path": "ISAT/configs.py", "snippet": "DEFAULT_CONFIG_FILE = os.path.join(ISAT_ROOT, 'default.yaml')" }, { "identifier": "CHECKPOINT_PATH", "path": "ISAT/configs.py", "snippet": "CHECKPOINT_PATH = os.path.join(ISAT_ROOT, 'checkpoints')" }, { "identifier": "ISAT_ROOT", "path": "ISAT/configs.py", "snippet": "ISAT_ROOT = os.path.split(os.path.abspath(__file__))[0]" }, { "identifier": "Object", "path": "ISAT/annotation.py", "snippet": "class Object:\n def __init__(self, category:str, group:int, segmentation, area, layer, bbox, iscrowd=0, note=''):\n self.category = category\n self.group = group\n self.segmentation = segmentation\n self.area = area\n self.layer = layer\n self.bbox = bbox\n self.iscrowd = iscrowd\n self.note = note" }, { "identifier": "Annotation", "path": "ISAT/annotation.py", "snippet": "class Annotation:\n def __init__(self, image_path, label_path):\n img_folder, img_name = os.path.split(image_path)\n self.description = 'ISAT'\n self.img_folder = img_folder\n self.img_name = img_name\n self.label_path = label_path\n self.note = ''\n\n image = np.array(Image.open(image_path))\n if image.ndim == 3:\n self.height, self.width, self.depth = image.shape\n elif image.ndim == 2:\n self.height, self.width = image.shape\n self.depth = 0\n else:\n self.height, self.width, self.depth = image.shape[:, :3]\n print('Warning: Except image has 2 or 3 ndim, but get {}.'.format(image.ndim))\n del image\n\n self.objects:List[Object,...] = []\n\n def load_annotation(self):\n if os.path.exists(self.label_path):\n with open(self.label_path, 'r') as f:\n dataset = load(f)\n info = dataset.get('info', {})\n description = info.get('description', '')\n if description == 'ISAT':\n # ISAT格式json\n objects = dataset.get('objects', [])\n self.img_name = info.get('name', '')\n width = info.get('width', None)\n if width is not None:\n self.width = width\n height = info.get('height', None)\n if height is not None:\n self.height = height\n depth = info.get('depth', None)\n if depth is not None:\n self.depth = depth\n self.note = info.get('note', '')\n for obj in objects:\n category = obj.get('category', 'unknow')\n group = obj.get('group', 0)\n if group is None: group = 0\n segmentation = obj.get('segmentation', [])\n iscrowd = obj.get('iscrowd', 0)\n note = obj.get('note', '')\n area = obj.get('area', 0)\n layer = obj.get('layer', 2)\n bbox = obj.get('bbox', [])\n obj = Object(category, group, segmentation, area, layer, bbox, iscrowd, note)\n self.objects.append(obj)\n else:\n # 不再支持直接打开labelme标注文件(在菜单栏-tool-convert中提供了isat<->labelme相互转换工具)\n print('Warning: The file {} is not a ISAT json.'.format(self.label_path))\n return self\n\n def save_annotation(self):\n dataset = {}\n dataset['info'] = {}\n dataset['info']['description'] = self.description\n dataset['info']['folder'] = self.img_folder\n dataset['info']['name'] = self.img_name\n dataset['info']['width'] = self.width\n dataset['info']['height'] = self.height\n dataset['info']['depth'] = self.depth\n dataset['info']['note'] = self.note\n dataset['objects'] = []\n for obj in self.objects:\n object = {}\n object['category'] = obj.category\n object['group'] = obj.group\n object['segmentation'] = obj.segmentation\n object['area'] = obj.area\n object['layer'] = obj.layer\n object['bbox'] = obj.bbox\n object['iscrowd'] = obj.iscrowd\n object['note'] = obj.note\n dataset['objects'].append(object)\n with open(self.label_path, 'w') as f:\n dump(dataset, f, indent=4)\n return True" }, { "identifier": "Polygon", "path": "ISAT/widgets/polygon.py", "snippet": "class Polygon(QtWidgets.QGraphicsPolygonItem):\n def __init__(self):\n super(Polygon, self).__init__(parent=None)\n self.line_width = 0\n self.hover_alpha = 150\n self.nohover_alpha = 80\n self.points = []\n self.vertexs = []\n self.category = ''\n self.group = 0\n self.iscrowd = 0\n self.note = ''\n\n self.rxmin, self.rxmax, self.rymin, self.rymax = 0, 0, 0, 0 # 用于绘画完成后,记录多边形的各边界,此处与points对应\n self.color = QtGui.QColor('#ff0000')\n self.is_drawing = True\n\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(QtGui.QBrush(self.color, QtCore.Qt.BrushStyle.FDiagPattern))\n\n self.setAcceptHoverEvents(True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)\n self.setZValue(1e5)\n\n def addPoint(self, point):\n print('addPoint')\n self.points.append(point)\n print(self.points)\n vertex = Vertex(self, self.color, 2)\n # 添加路径点\n self.scene().addItem(vertex)\n self.vertexs.append(vertex)\n vertex.setPos(point)\n\n def movePoint(self, index, point):\n if not 0 <= index < len(self.points):\n return\n self.points[index] = self.mapFromScene(point)\n\n self.redraw()\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n def removePoint(self, index):\n if not self.points:\n return\n self.points.pop(index)\n vertex = self.vertexs.pop(index)\n self.scene().removeItem(vertex)\n del vertex\n self.redraw()\n\n def delete(self):\n self.points.clear()\n while self.vertexs:\n vertex = self.vertexs.pop()\n self.scene().removeItem(vertex)\n del vertex\n\n def moveVertex(self, index, point):\n if not 0 <= index < len(self.vertexs):\n return\n vertex = self.vertexs[index]\n vertex.setEnabled(False)\n vertex.setPos(point)\n vertex.setEnabled(True)\n\n def itemChange(self, change: 'QGraphicsItem.GraphicsItemChange', value: typing.Any):\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged and not self.is_drawing: # 选中改变\n if self.isSelected():\n color = QtGui.QColor('#00A0FF')\n color.setAlpha(self.hover_alpha)\n self.setBrush(color)\n else:\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n self.scene().mainwindow.annos_dock_widget.set_selected(self) # 更新label面板\n\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange: # ItemPositionHasChanged\n bias = value\n l, t, b, r = self.boundingRect().left(), self.boundingRect().top(), self.boundingRect().bottom(), self.boundingRect().right()\n if l + bias.x() < 0: bias.setX(-l)\n if r + bias.x() > self.scene().width(): bias.setX(self.scene().width()-r)\n if t + bias.y() < 0: bias.setY(-t)\n if b + bias.y() > self.scene().height(): bias.setY(self.scene().height()-b)\n\n for index, point in enumerate(self.points):\n self.moveVertex(index, point+bias)\n\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n return super(Polygon, self).itemChange(change, value)\n\n def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.hover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def mouseDoubleClickEvent(self, event: 'QGraphicsSceneMouseEvent'):\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n self.scene().mainwindow.category_edit_widget.polygon = self\n self.scene().mainwindow.category_edit_widget.load_cfg()\n self.scene().mainwindow.category_edit_widget.show()\n\n def redraw(self):\n if len(self.points) < 1:\n return\n xs = [p.x() for p in self.points]\n ys = [p.y() for p in self.points]\n self.rxmin, self.rymin, self.rxmax, self.rymax = min(xs), min(ys), max(xs), max(ys)\n self.setPolygon(QtGui.QPolygonF(self.points))\n\n def change_color(self, color):\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n for vertex in self.vertexs:\n vertex_color = self.color\n vertex_color.setAlpha(255)\n vertex.setPen(QtGui.QPen(vertex_color, self.line_width))\n vertex.setBrush(vertex_color)\n\n def set_drawed(self, category, group, iscrowd, note, color:QtGui.QColor, layer=None):\n self.is_drawing = False\n self.category = category\n if isinstance(group, str):\n group = 0 if group == '' else int(group)\n self.group = group\n self.iscrowd = iscrowd\n self.note = note\n\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n if layer is not None:\n self.setZValue(layer)\n for vertex in self.vertexs:\n vertex.setColor(color)\n\n def calculate_area(self):\n area = 0\n num_points = len(self.points)\n for i in range(num_points):\n p1 = self.points[i]\n p2 = self.points[(i + 1) % num_points]\n d = p1.x() * p2.y() - p2.x() * p1.y()\n area += d\n return abs(area) / 2\n\n def load_object(self, object):\n segmentation = object.segmentation\n for x, y in segmentation:\n point = QtCore.QPointF(x, y)\n self.addPoint(point)\n color = self.scene().mainwindow.category_color_dict.get(object.category, '#000000')\n self.set_drawed(object.category, object.group, object.iscrowd, object.note, QtGui.QColor(color), object.layer) # ...\n\n def to_object(self):\n if self.is_drawing:\n return None\n segmentation = []\n for point in self.points:\n point = point + self.pos()\n segmentation.append((round(point.x(), 2), round(point.y(), 2)))\n xmin = self.boundingRect().x() + self.pos().x()\n ymin = self.boundingRect().y() + self.pos().y()\n xmax = xmin + self.boundingRect().width()\n ymax = ymin + self.boundingRect().height()\n\n object = Object(self.category, group=self.group, segmentation=segmentation,\n area=self.calculate_area(), layer=self.zValue(), bbox=(xmin, ymin, xmax, ymax), iscrowd=self.iscrowd, note=self.note)\n return object" }, { "identifier": "PromptPoint", "path": "ISAT/widgets/polygon.py", "snippet": "class PromptPoint(QtWidgets.QGraphicsPathItem):\n def __init__(self, pos, type=0):\n super(PromptPoint, self).__init__()\n self.color = QtGui.QColor('#0000FF') if type==0 else QtGui.QColor('#00FF00')\n self.color.setAlpha(255)\n self.painterpath = QtGui.QPainterPath()\n self.painterpath.addEllipse(\n QtCore.QRectF(-1, -1, 2, 2))\n self.setPath(self.painterpath)\n self.setBrush(self.color)\n self.setPen(QtGui.QPen(self.color, 3))\n self.setZValue(1e5)\n\n self.setPos(pos)" }, { "identifier": "ConverterDialog", "path": "ISAT/widgets/converter_dialog.py", "snippet": "class ConverterDialog(QtWidgets.QDialog, Ui_Dialog):\n def __init__(self, parent, mainwindow):\n super(ConverterDialog, self).__init__(parent=parent)\n self.setWindowTitle('转换')\n self.layout = QVBoxLayout()\n self.mainwindow = mainwindow\n self.setWindowModality(QtCore.Qt.WindowModality.WindowModal)\n\n self.path_layout = QHBoxLayout()\n self.button = QPushButton('保存至')\n self.button.clicked.connect(self.select_folder)\n self.path_layout.addWidget(self.button)\n self.path_text = QLineEdit()\n self.path_text.setReadOnly(True)\n self.path_layout.addWidget(self.path_text)\n self.layout.addLayout(self.path_layout)\n\n\n # 最底部居中按钮\n self.bottom_layout = QHBoxLayout()\n self.bottom_layout.addStretch()\n self.bottom_button = QPushButton('转换')\n self.bottom_layout.addWidget(self.bottom_button)\n self.bottom_layout.addStretch()\n self.layout.addLayout(self.bottom_layout)\n self.bottom_button.clicked.connect(self.confirm_action)\n self.setLayout(self.layout)\n\n def select_folder(self):\n folder = QFileDialog.getExistingDirectory(self, '保存至')\n if folder:\n self.path_text.setText(folder)\n\n def confirm_action(self):\n path = self.path_text.text()\n if path == '':\n self.mainwindow.statusBar().showMessage('请先选择保存路径')\n QMessageBox.warning(self, '警告', '请先选择保存路径')\n return\n if not os.path.exists(path):\n os.makedirs(path)\n self.mainwindow.statusBar().showMessage('正在转换')\n labels_dir = self.mainwindow.label_root\n image_dir = self.mainwindow.image_root\n for inx, label in enumerate(os.listdir(labels_dir)):\n print(inx, label)\n label_path = os.path.join(labels_dir, label)\n image_path = os.path.join(image_dir, label[:-5] + '.jpg')\n if not os.path.exists(image_path):\n image_path = os.path.join(image_dir, label[:-5] + '.png')\n if not os.path.exists(image_path):\n image_path = os.path.join(image_dir, label[:-5] + '.jpeg')\n if not os.path.exists(image_path):\n continue\n image = Image.open(image_path)\n with open(label_path, 'r') as f:\n rects = json.load(f)\n \n for inx, rect in enumerate(rects):\n x1, y1, x2, y2 = rect['point1-x'], rect['point1-y'], rect['point2-x'], rect['point2-y']\n left = min(x1, x2)\n right = max(x1, x2)\n top = min(y1, y2)\n bottom = max(y1, y2)\n cropped_image = image.crop((left, top, right, bottom))\n save_path = os.path.join(path, label[:-5] + '_' + str(inx) + image_path[-4:])\n print(save_path)\n cropped_image.save(save_path)\n\n self.mainwindow.statusBar().showMessage('转换完成')\n QMessageBox.warning(self, '提示', '转换完成')" } ]
from PyQt5 import QtWidgets, QtCore, QtGui from ISAT.ui.MainWindow import Ui_MainWindow from ISAT.widgets.files_dock_widget import FilesDockWidget from ISAT.widgets.canvas import AnnotationScene, AnnotationView from ISAT.configs import STATUSMode, MAPMode, load_config, save_config, CONFIG_FILE, DEFAULT_CONFIG_FILE, CHECKPOINT_PATH, ISAT_ROOT from ISAT.annotation import Object, Annotation from ISAT.widgets.polygon import Polygon, PromptPoint from ISAT.widgets.converter_dialog import ConverterDialog from PIL import Image from PyQt5.QtCore import QThread, pyqtSignal import os import json import functools import imgviz import ISAT.icons_rc import numpy as np import cv2 # 调整图像饱和度
12,578
# -*- coding: utf-8 -*- # @Author : LG class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.image_root: str = None self.label_root:str = None self.files_list: list = [] self.current_index = None self.current_file_index: int = None self.current_group = 1 self.config_file = CONFIG_FILE if os.path.exists(CONFIG_FILE) else DEFAULT_CONFIG_FILE self.saved = True self.can_be_annotated = True self.load_finished = False self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标
# -*- coding: utf-8 -*- # @Author : LG class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.image_root: str = None self.label_root:str = None self.files_list: list = [] self.current_index = None self.current_file_index: int = None self.current_group = 1 self.config_file = CONFIG_FILE if os.path.exists(CONFIG_FILE) else DEFAULT_CONFIG_FILE self.saved = True self.can_be_annotated = True self.load_finished = False self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标
self.current_label:Annotation = None
13
2023-12-24 16:19:16+00:00
16k
khabbazan/Mattermost-Subscriptions
helpers/channels_graphql_ws/subscription.py
[ { "identifier": "GraphqlWsConsumer", "path": "helpers/channels_graphql_ws/graphql_ws_consumer.py", "snippet": "class GraphqlWsConsumer(ch_websocket.AsyncJsonWebsocketConsumer):\n \"\"\"Channels consumer for the WebSocket GraphQL backend.\n\n NOTE: Each instance of this class maintains one WebSocket\n connection to a single client.\n\n This class implements the WebSocket-based GraphQL protocol used by\n `subscriptions-transport-ws` library (used by Apollo):\n https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md\n \"\"\"\n\n # ----------------------------------------------------------------- PUBLIC INTERFACE\n\n # Overwrite this in the subclass to specify the GraphQL schema which\n # processes GraphQL queries.\n schema: graphene.Schema\n\n # The interval to send keepalive messages to the clients (seconds).\n send_keepalive_every: Optional[float] = None\n\n # Set to `True` to process requests (i.e. GraphQL documents) from\n # a client in order of arrival, which is the same as sending order,\n # as guaranteed by the WebSocket protocol. This means that request\n # processing for this particular client becomes serial - in other\n # words, the server will not start processing another request\n # before it finishes the current one. Note that requests from\n # different clients (within different WebSocket connections)\n # are still processed asynchronously. Useful for tests.\n strict_ordering: bool = False\n\n # When set to `True` the server will send an empty data message in\n # response to the subscription. This is needed to let client know\n # when the subscription activates, so he can be sure he doesn't miss\n # any notifications. Disabled by default, cause this is an extension\n # to the original protocol and the client must be tuned accordingly.\n confirm_subscriptions: bool = False\n\n # The message sent to the client when subscription activation\n # confirmation is enabled.\n subscription_confirmation_message: Dict[str, Any] = {\"data\": None, \"errors\": None}\n\n # Issue a warning to the log when operation takes longer than\n # specified number in seconds. None disables the warning.\n warn_operation_timeout: Optional[float] = 1\n\n # The size of the subscription notification queue. If there are more\n # notifications (for a single subscription) than the given number,\n # then an oldest notification is dropped and a warning is logged.\n subscription_notification_queue_limit: int = 1024\n\n # GraphQL middleware.\n # Instance of `graphql.MiddlewareManager` or the list of functions\n # (callables) like the following:\n # ```python\n # async def my_middleware(next_middleware, root, info, *args, **kwds):\n # result = next_middleware(root, info, *args, **kwds)\n # if graphql.pyutils.is_awaitable(result):\n # result = await result\n # return result\n # ```\n # The first middleware in the middlewares list will be the closest\n # to the resolver in the middlewares call stack.\n # For more information read docs:\n # - https://docs.graphene-python.org/en/latest/execution/middleware/#middleware\n # - https://graphql-core-3.readthedocs.io/en/latest/diffs.html#custom-middleware\n # Docs about async middlewares are still missing - read the\n # GraphQL-core sources to know more.\n middleware: Optional[graphql.Middleware] = None\n\n async def on_connect(self, payload):\n \"\"\"Client connection handler.\n\n Called after CONNECTION_INIT message from client. Overwrite and\n raise an Exception to tell the server to reject the connection\n when it's necessary.\n\n Args:\n payload: Payload from CONNECTION_INIT message.\n \"\"\"\n del payload\n\n async def on_operation(self, op_id, payload):\n \"\"\"Process business logic before operation processing starts.\n\n Useful e.g. to check that user session is not yet expired.\n\n Throw `graphql.error.GraphQLError` to cancel the operation.\n\n Args:\n op_id: Operation id.\n payload: Payload of the operation.\n \"\"\"\n del op_id, payload\n\n # ------------------------------------------------------------------- IMPLEMENTATION\n\n # A prefix of Channel groups with subscription notifications.\n group_name_prefix: str = \"GQLWS\"\n\n # Structure that holds subscription information.\n @dataclasses.dataclass\n class _SubInf:\n \"\"\"Subscription information structure.\"\"\"\n\n # Subscription identifier - protocol operation identifier.\n sid: int\n # Subscription groups the subscription belongs to.\n groups: List[str]\n # A function which triggets subscription.\n enqueue_notification: Callable[[Any], None]\n # The callback to invoke when client unsubscribes.\n unsubscribed_callback: Callable[..., Awaitable[None]]\n\n def __init__(self, *args, **kwargs):\n \"\"\"Consumer constructor.\"\"\"\n\n assert self.schema is not None, \"An attribute 'schema' is not set! Subclasses must specify \" \"the schema which processes GraphQL subscription queries.\"\n\n # Registry of active (subscribed) subscriptions.\n self._subscriptions: Dict[int, GraphqlWsConsumer._SubInf] = {} # {'<sid>': '<SubInf>', ...}\n self._sids_by_group = {} # {'<grp>': ['<sid0>', '<sid1>', ...], ...}\n\n # Tasks which send notifications to clients indexed by an\n # operation/subscription id.\n self._notifier_tasks: Dict[int, asyncio.Task] = {}\n\n # Task that sends keepalive messages periodically.\n self._keepalive_task = None\n\n # Background tasks to clean it up when a client disconnects.\n # We use weak collection so finished task will be autoremoved.\n self._background_tasks: weakref.WeakSet = weakref.WeakSet()\n\n # Crafty weak collection with per-operation locks. It holds a\n # mapping from the operaion id (protocol message id) to the\n # `asyncio.Lock` used to serialize processing of start & stop\n # requests. Since the collection is weak, it automatically\n # throws away items when locks are garbage collected.\n self._operation_locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary()\n\n # MiddlewareManager maintains internal cache for resolvers\n # wrapped with middlewares. Using the same manager for all\n # operations improves performance.\n self._middleware = None\n if self.middleware:\n self._middleware = self.middleware\n if not isinstance(self._middleware, graphql.MiddlewareManager):\n self._middleware = graphql.MiddlewareManager(*self._middleware)\n\n super().__init__(*args, **kwargs)\n\n # ---------------------------------------------------------- CONSUMER EVENT HANDLERS\n\n async def connect(self):\n \"\"\"Handle new WebSocket connection.\"\"\"\n\n # Check the subprotocol told by the client.\n #\n # NOTE: In Python 3.6 `scope[\"subprotocols\"]` was a string, but\n # starting with Python 3.7 it is a bytes. This can be a proper\n # change or just a bug in the Channels to be fixed. So let's\n # accept both variants until it becomes clear.\n assert GRAPHQL_WS_SUBPROTOCOL in ((sp.decode() if isinstance(sp, bytes) else sp) for sp in self.scope[\"subprotocols\"]), (\n f\"WebSocket client does not request for the subprotocol \" f\"{GRAPHQL_WS_SUBPROTOCOL}!\"\n )\n\n # Accept connection with the GraphQL-specific subprotocol.\n await self.accept(subprotocol=GRAPHQL_WS_SUBPROTOCOL)\n\n async def disconnect(self, code):\n \"\"\"Handle WebSocket disconnect.\n\n Remove itself from the Channels groups, clear triggers and stop\n sending keepalive messages.\n \"\"\"\n\n # Print debug or warning message depending on the value of the\n # connection close code. We consider all reserved codes (<999),\n # 1000 \"Normal Closure\", and 1001 \"Going Away\" as OK.\n # See: https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent\n if not code:\n LOG.warning(\"WebSocket connection closed without a code!\")\n elif code <= 1001:\n LOG.debug(\"WebSocket connection closed with code: %s.\", code)\n else:\n LOG.warning(\"WebSocket connection closed with code: %s!\", code)\n\n # The list of awaitables to simultaneously wait at the end.\n waitlist: List[asyncio.Task] = []\n\n # Unsubscribe from the Channels groups.\n waitlist += [asyncio.create_task(self._channel_layer.group_discard(group, self.channel_name)) for group in self._sids_by_group]\n\n # Cancel all currently running background tasks.\n for bg_task in self._background_tasks:\n bg_task.cancel()\n waitlist += list(self._background_tasks)\n\n # Stop sending keepalive messages (if enabled).\n if self._keepalive_task is not None:\n self._keepalive_task.cancel()\n waitlist += [self._keepalive_task]\n\n # Stop tasks which listen to GraphQL lib and send notifications.\n for notifier_task in self._notifier_tasks.values():\n notifier_task.cancel()\n waitlist += [notifier_task]\n\n # Wait for tasks to stop.\n if waitlist:\n await asyncio.wait(waitlist)\n\n self._background_tasks.clear()\n self._keepalive_task = None\n self._notifier_tasks.clear()\n self._operation_locks.clear()\n self._sids_by_group.clear()\n self._subscriptions.clear()\n\n async def receive_json(self, content): # pylint: disable=arguments-differ\n \"\"\"Process WebSocket message received from the client.\n\n NOTE: We force 'STOP' message processing to wait until 'START'\n with the same operation id finishes (if it is running). This\n protects us from race conditions which may happen when a client\n stops operation immediately after starting it. An illustrative\n example is a subscribe-unsubscribe pair. If we spawn processing\n of both messages concurrently we can deliver subscription\n confirmation after unsubscription confirmation.\n \"\"\"\n\n # Extract message type based on which we select how to proceed.\n msg_type = content[\"type\"].upper()\n\n if msg_type == \"CONNECTION_INIT\":\n task = self._on_gql_connection_init(payload=content[\"payload\"])\n\n elif msg_type == \"CONNECTION_TERMINATE\":\n task = self._on_gql_connection_terminate()\n\n elif msg_type == \"START\":\n op_id = content[\"id\"]\n\n # Create and lock a mutex for this particular operation id,\n # so STOP processing for the same operation id will wait\n # until START processing finishes. Locks are stored in a\n # weak collection so we do not have to manually clean it up.\n if op_id in self._operation_locks:\n raise graphql.error.GraphQLError(f\"Operation with msg_id={op_id} is already running!\")\n op_lock = asyncio.Lock()\n self._operation_locks[op_id] = op_lock\n await op_lock.acquire()\n\n async def on_start():\n try:\n # User hook which raises to cancel processing.\n await self.on_operation(op_id, payload=content[\"payload\"])\n # START message processing.\n await self._on_gql_start(op_id, payload=content[\"payload\"])\n except Exception as ex: # pylint: disable=broad-except\n await self._send_gql_error(op_id, ex)\n finally:\n op_lock.release()\n\n task = on_start()\n\n elif msg_type == \"STOP\":\n op_id = content[\"id\"]\n\n async def on_stop():\n # Wait until START message processing finishes, if any.\n async with self._operation_locks.setdefault(op_id, asyncio.Lock()):\n await self._on_gql_stop(op_id)\n\n task = on_stop()\n\n else:\n task = self._send_gql_error(\n content[\"id\"] if \"id\" in content else None,\n Exception(f\"Wrong message type '{msg_type}'!\"),\n )\n\n # If strict ordering is required then simply wait until the\n # message processing finishes. Otherwise spawn a task so\n # Channels may continue calling `receive_json` while requests\n # (i.e. GraphQL documents) are being processed.\n if self.strict_ordering:\n await task\n else:\n self._spawn_background_task(task)\n\n async def broadcast(self, message):\n \"\"\"The broadcast message handler.\n\n Method is called when new `broadcast` message (sent by\n `Subscription.broadcast`) received from the Channels group.\n\n \"\"\"\n # If strict ordering is required then simply wait until all the\n # broadcast messages are sent. Otherwise spawn a task so this\n # consumer will continue receiving messages.\n if self.strict_ordering:\n await self._process_broadcast(message)\n else:\n self._spawn_background_task(self._process_broadcast(message))\n\n async def _process_broadcast(self, message):\n \"\"\"Process the broadcast message.\n\n This triggers subscription notification to all the subscriptions\n belonging to the group received in the `message`.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task by the `broadcast` method (message handler).\n \"\"\"\n group = message[\"group\"]\n\n # Do nothing if group does not exist. It is quite possible for\n # a client and a backend to concurrently unsubscribe and send\n # notification. And these events do not need to be synchronized.\n if group not in self._sids_by_group:\n return\n\n payload = message[\"payload\"]\n\n # Put the payload to the notification queues of subscriptions\n # belonging to the subscription group. Drop the oldest payloads\n # if the `notification_queue` is full.\n for sid in self._sids_by_group[group]:\n subinf = self._subscriptions[sid]\n subinf.enqueue_notification(payload)\n\n async def unsubscribe(self, message):\n \"\"\"The unsubscribe message handler.\n\n Method is called when new `unsubscribe` message received from\n the Channels group. The message is typically sent by the method\n `Subscription.unsubscribe`. Here we figure out the group message\n received from and stop all the subscriptions in this group.\n \"\"\"\n group = message[\"group\"]\n\n # Do nothing if group does not exist. It is quite possible for\n # a client and a backend to unsubscribe from a subscription\n # concurrently. And these events do not need to be synchronized.\n if group not in self._sids_by_group:\n return\n\n # Send messages which look like user unsubscribes from all\n # subscriptions in the subscription group. This saves us from\n # thinking about raise condition between subscription and\n # unsubscription.\n if self._sids_by_group[group]:\n await asyncio.wait([asyncio.create_task(self.receive_json({\"type\": \"stop\", \"id\": sid})) for sid in self._sids_by_group[group]])\n\n # ---------------------------------------------------------- GRAPHQL PROTOCOL EVENTS\n\n async def _on_gql_connection_init(self, payload):\n \"\"\"Process the CONNECTION_INIT message.\n\n Start sending keepalive messages if `send_keepalive_every` set.\n Respond with either CONNECTION_ACK or CONNECTION_ERROR message.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n try:\n # Notify subclass a new client is connected.\n await self.on_connect(payload)\n except Exception as ex: # pylint: disable=broad-except\n await self._send_gql_connection_error(ex)\n # Close the connection. NOTE: We use the 4000 code because\n # there are two reasons: A) We can not use codes greater\n # than 1000 and less than 3000 because Daphne and Autobahn\n # do not allow this (see `sendClose` from\n # `autobahn/websocket/protocol.py` and\n # `daphne/ws_protocol.py`). B)\n # https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent\n # Mozilla offers codes 4000–4999 available for all apps.\n await self.close(code=4000)\n else:\n # Send CONNECTION_ACK message.\n await self._send_gql_connection_ack()\n # If keepalive enabled then send one message immediately and\n # schedule periodic messages.\n if self.send_keepalive_every is not None:\n send_keepalive_every = self.send_keepalive_every\n\n async def keepalive_sender():\n \"\"\"Send keepalive messages periodically.\"\"\"\n while True:\n await asyncio.sleep(send_keepalive_every)\n await self._send_gql_connection_keep_alive()\n\n self._keepalive_task = asyncio.create_task(keepalive_sender())\n # Immediately send keepalive message cause it is\n # required by the protocol description.\n await self._send_gql_connection_keep_alive()\n\n async def _on_gql_connection_terminate(self):\n \"\"\"Process the CONNECTION_TERMINATE message.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n\n # Close the connection.\n await self.close(code=1000)\n\n async def _on_gql_start(self, op_id, payload):\n \"\"\"Process the START message.\n\n Handle the message with query, mutation or subscription request.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n try:\n if op_id in self._subscriptions:\n message = f\"Subscription with msg_id={op_id} already exists!\"\n raise graphql.error.GraphQLError(message)\n\n # Get the message data.\n query = payload[\"query\"]\n op_name = payload.get(\"operationName\")\n variables = payload.get(\"variables\", {})\n\n # Prepare a context object.\n context = DictAsObject({})\n context.channels_scope = self.scope\n context.channel_name = self.channel_name\n context.graphql_operation_name = op_name\n context.graphql_operation_id = op_id\n\n # Process the request with Graphene and GraphQL-core.\n doc_ast, op_ast, errors = await self._on_gql_start__parse_query(op_name, query)\n if errors:\n await self._send_gql_data(op_id, None, errors)\n await self._send_gql_complete(op_id)\n return\n # Assert values are not None to suppress MyPy complains.\n assert doc_ast is not None\n assert op_ast is not None\n\n # If the operation is subscription.\n if op_ast.operation == graphql.language.ast.OperationType.SUBSCRIPTION:\n LOG.debug(\n \"Subscription request. Operation ID: %s, operation name: %s.)\",\n op_id,\n op_name,\n )\n\n # This returns asynchronous generator or ExecutionResult\n # instance in case of error.\n subscr_result = await self._on_gql_start__subscribe(\n doc_ast,\n operation_name=op_name,\n root_value=None,\n variable_values=variables,\n context_value=context,\n subscribe_field_resolver=functools.partial(\n self._on_gql_start__initialize_subscription_stream,\n op_id,\n op_name,\n ),\n middleware=self._middleware,\n )\n\n # When subscr_result is an AsyncGenerator, consume\n # stream of notifications and send them to clients.\n if not isinstance(subscr_result, graphql.ExecutionResult):\n stream = cast(AsyncIterator[graphql.ExecutionResult], subscr_result)\n # Send subscription activation message (if enabled)\n # NOTE: We do it before reading the the stream\n # stream to guarantee that no notifications are sent\n # before the subscription confirmation message.\n if self.confirm_subscriptions:\n await self._send_gql_data(\n op_id,\n data=self.subscription_confirmation_message[\"data\"],\n errors=self.subscription_confirmation_message[\"errors\"],\n )\n\n consumer_init_done = asyncio.Event()\n\n async def consume_stream():\n consumer_init_done.set()\n try:\n async for item in stream:\n # Skipped subscription event may have no\n # data and no errors. Send message only\n # when we have something to send.\n if item.data or item.errors:\n try:\n await self._send_gql_data(op_id, item.data, item.errors)\n except asyncio.CancelledError:\n break\n except Exception as ex: # pylint: disable=broad-except\n LOG.debug(\n \"Exception in the subscription GraphQL resolver!\" \"Operation %s(%s).\",\n op_name,\n op_id,\n exc_info=ex,\n )\n await self._send_gql_data(op_id, None, [ex])\n\n # We need to end this task when client drops\n # connection or unsubscribes, so lets store it.\n self._notifier_tasks[op_id] = asyncio.create_task(consume_stream())\n\n # We must be sure here that the subscription\n # initialization is finished and the stream consumer\n # is active before we exit this function. Because in\n # the outer scope we have locking mechanism of start\n # and stop operations. And we want to say\n # \"subscription operation is started\" only when it\n # actually is.\n # This allows us to avoid the race condition between\n # simultaneous subscribe and unsubscribe calls.\n await consumer_init_done.wait()\n return\n\n # Else (when gql_subscribe returns ExecutionResult\n # containing error) fallback to standard handling below.\n operation_result = cast(graphql.ExecutionResult, subscr_result)\n\n # If the operation is query or mutation.\n else:\n LOG.debug(\"New query/mutation. Operation %s(%s).\", op_name, op_id)\n\n if self.warn_operation_timeout is not None:\n start_time = time.perf_counter()\n\n # Standard name for \"IntrospectionQuery\". We might also\n # check that\n # `doc_ast.definitions[0].selection_set.selections[0].name.value`\n # equals to `__schema`. This is a more robust way. But\n # it will eat up more CPU pre each query. For now lets\n # check only a query name.\n middleware_manager = self._middleware\n if op_name == \"IntrospectionQuery\":\n # No need to call middlewares for the\n # IntrospectionQuery. There no real resolvers. Only\n # the type information.\n middleware_manager = None\n exec_result = graphql.execution.execute(\n self.schema.graphql_schema,\n document=doc_ast,\n root_value=None,\n operation_name=op_name,\n variable_values=variables,\n context_value=context,\n middleware=middleware_manager,\n )\n if inspect.isawaitable(exec_result):\n exec_result = await exec_result\n operation_result = cast(graphql.ExecutionResult, exec_result)\n\n if self.warn_operation_timeout is not None:\n duration = time.perf_counter() - start_time\n if duration >= self.warn_operation_timeout:\n LOG.warning(\n \"Operation %s(%s) took %.6f seconds. Debug\" \" log contains full operation details.\",\n op_name,\n op_id,\n duration,\n )\n LOG.debug(\n \"Operation %s(%s) took %.6f seconds. Query:\" \" %r, variables: %r.\",\n op_name,\n op_id,\n duration,\n query,\n variables,\n )\n # Respond to a query or mutation immediately.\n await self._send_gql_data(op_id, operation_result.data, operation_result.errors)\n await self._send_gql_complete(op_id)\n\n except Exception as ex: # pylint: disable=broad-except\n if isinstance(ex, graphql.error.GraphQLError):\n # Respond with details of GraphQL execution error.\n LOG.warning(\"GraphQL error! Operation %s(%s).\", op_name, op_id, exc_info=True)\n await self._send_gql_data(op_id, None, [ex])\n await self._send_gql_complete(op_id)\n else:\n # Respond with general error responce.\n await self._send_gql_error(op_id, ex)\n\n async def _on_gql_start__parse_query(\n self, op_name: str, query: str\n ) -> Tuple[Optional[graphql.DocumentNode], Optional[graphql.OperationDefinitionNode], Optional[Iterable[graphql.GraphQLError]],]:\n \"\"\"Parse and validate GraphQL query.\n\n It is highly likely that the same operation will be parsed many\n times, so this function is wrapped with LRU cache.\n\n This async function offloads the GraphQL processing to the\n worker thread cause according to our experiments even GraphQL\n document parsing and validation take a while and depends approx.\n linearly on the size of the selection set.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n\n Returns:\n Tuple with three optional fields:\n 0: AST of parsed GraphQL document.\n 1: GraphQL operation definition.\n 2: Sequence of errors.\n \"\"\"\n\n res = await channels.db.database_sync_to_async(self._on_gql_start__parse_query_sync_cached, thread_sensitive=False)(op_name, query)\n\n doc_ast: Optional[graphql.DocumentNode] = res[0]\n op_ast: Optional[graphql.OperationDefinitionNode] = res[1]\n errors: Optional[Iterable[graphql.GraphQLError]] = res[2]\n\n return (doc_ast, op_ast, errors)\n\n @functools.lru_cache(maxsize=128)\n def _on_gql_start__parse_query_sync_cached(\n self, op_name: str, query: str\n ) -> Tuple[Optional[graphql.DocumentNode], Optional[graphql.OperationDefinitionNode], Optional[Iterable[graphql.GraphQLError]],]:\n \"\"\"Parse and validate GraphQL query. Cached sync implementation.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n \"\"\"\n\n # Parsing.\n try:\n doc_ast = graphql.parse(query)\n except graphql.GraphQLError as ex:\n return None, None, [ex]\n\n # Validation.\n validation_errors: List[graphql.GraphQLError] = graphql.validate(self.schema.graphql_schema, doc_ast)\n if validation_errors:\n return None, None, validation_errors\n\n op_ast = graphql.utilities.get_operation_ast(doc_ast, op_name)\n\n return doc_ast, op_ast, None\n\n async def _on_gql_start__subscribe(\n self,\n document: graphql.DocumentNode,\n root_value: Any = None,\n context_value: Any = None,\n variable_values: Optional[Dict[str, Any]] = None,\n operation_name: Optional[str] = None,\n field_resolver: Optional[graphql.GraphQLFieldResolver] = None,\n subscribe_field_resolver: Optional[graphql.GraphQLFieldResolver] = None,\n middleware: graphql.Middleware = None,\n execution_context_class: Optional[Type[graphql.ExecutionContext]] = None,\n ) -> Union[AsyncIterator[graphql.ExecutionResult], graphql.ExecutionResult]:\n \"\"\"Create a GraphQL subscription.\n\n This is a copy of `graphql.execution.subscribe.subscribe` from\n the GraphQL-core library v3.2.3 improved to support middlewares\n and user defined execution_context_class.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n \"\"\"\n\n result_or_stream = await graphql.create_source_event_stream(\n self.schema.graphql_schema,\n document,\n root_value,\n context_value,\n variable_values,\n operation_name,\n subscribe_field_resolver,\n )\n if isinstance(result_or_stream, graphql.ExecutionResult):\n return result_or_stream\n\n async def map_source_to_response(payload: Any) -> graphql.ExecutionResult:\n \"\"\"Map source to response.\n\n For each payload yielded from a subscription, map it over\n the normal GraphQL :func:`~graphql.execute` function, with\n `payload` as the `root_value`. This implements the\n \"MapSourceToResponseEvent\" algorithm described in the\n GraphQL specification. The :func:`~graphql.execute` function\n provides the \"ExecuteSubscriptionEvent\" algorithm, as it is\n nearly identical to the \"ExecuteQuery\" algorithm, for which\n :func:`~graphql.execute` is also used.\n \"\"\"\n result = graphql.execute(\n self.schema.graphql_schema,\n document,\n payload,\n context_value,\n variable_values,\n operation_name,\n field_resolver,\n middleware=middleware,\n execution_context_class=execution_context_class,\n ) # type: ignore\n result = await result if inspect.isawaitable(result) else result\n result = cast(graphql.ExecutionResult, result)\n # Skip notification if subscription returned `None`.\n if not result.errors and result.data:\n for key in list(result.data.keys()):\n if result.data[key] is None:\n result.data.pop(key)\n return result\n\n # Map every source value to a ExecutionResult value.\n return graphql.MapAsyncIterator(result_or_stream, map_source_to_response)\n\n async def _on_gql_start__initialize_subscription_stream(\n self,\n operation_id: int,\n operation_name: str,\n root: Any,\n info: graphql.GraphQLResolveInfo,\n *args,\n **kwds,\n ):\n \"\"\"Create asynchronous generator with subscription events.\n\n Called inside `_on_gql_start__subscribe` function by\n graphql-core as `subscribe_field_resolver` argument.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n \"\"\"\n # Graphene stores original subscription class in `graphene_type`\n # field of `return_type` object. Since subscriptions are build\n # on top of `graphene` we always have graphene specific\n # `return_type` class.\n return_type = info.return_type\n while graphql.is_wrapping_type(return_type):\n return_type = return_type.of_type # type: ignore[union-attr]\n subscription_class = return_type.graphene_type # type: ignore[union-attr]\n\n # It is ok to access private fields of `Subscription`\n # implementation. `Subscription` class used to create\n # subscriptions as graphene object but actually it is a part of\n # consumer implementation.\n # pylint: disable=protected-access\n\n # Attach current subscription to the group corresponding to\n # the concrete class. This allows to trigger all the\n # subscriptions of the current type, by invoking `publish`\n # without setting the `group` argument.\n groups = [subscription_class._group_name()]\n\n # Invoke the subclass-specified `subscribe` method to get\n # the groups subscription must be attached to.\n if subscription_class._meta.subscribe is not None:\n subclass_groups = subscription_class._meta.subscribe(root, info, *args, **kwds)\n # Properly handle `async def subscribe`.\n if asyncio.iscoroutinefunction(subscription_class._meta.subscribe):\n subclass_groups = await subclass_groups\n assert subclass_groups is None or isinstance(subclass_groups, (list, tuple)), (\n f\"Method 'subscribe' returned a value of an incorrect type\" f\" {type(subclass_groups)}! A list, a tuple, or 'None' expected.\"\n )\n subclass_groups = subclass_groups or []\n else:\n subclass_groups = []\n\n groups += [subscription_class._group_name(group) for group in subclass_groups]\n\n # The subscription notification queue. Required to preserve the\n # order of notifications within a single subscription.\n queue_size = subscription_class.notification_queue_limit\n if queue_size is None or queue_size <= 0:\n # Take default limit from the Consumer class.\n queue_size = self.subscription_notification_queue_limit\n # The subscription notification queue.\n # NOTE: The asyncio.Queue class is not thread-safe. So use the\n # `notification_queue_lock` as a guard while reading or writing\n # to the queue.\n notification_queue: asyncio.Queue = asyncio.Queue(maxsize=queue_size)\n # Lock to ensure that `notification_queue` operations are\n # thread safe.\n notification_queue_lock = threading.RLock()\n\n unsubscribed = subscription_class._meta.unsubscribed\n\n async def unsubscribed_callback():\n \"\"\"Call `unsubscribed` notification.\n\n The `cls._meta.unsubscribed` might do blocking operations,\n so offload it to the thread.\n \"\"\"\n\n if unsubscribed is None:\n return None\n result = unsubscribed(None, info, *args, **kwds)\n # Properly handle `async def unsubscribed`.\n if inspect.isawaitable(result):\n result = await result\n\n def enqueue_notification(payload):\n \"\"\"Put notification to the queue.\n\n Called by the WebSocket consumer (instance of the\n GraphqlWsConsumer subclass) when it receives the broadcast\n message (from the Channels group) sent by the\n Subscription.broadcast.\n\n Args:\n sid: Operation id of the subscription.\n \"\"\"\n while True:\n with notification_queue_lock:\n try:\n notification_queue.put_nowait(payload)\n break # The item was enqueued. Exit the loop.\n except asyncio.QueueFull:\n # The queue is full - issue a warning and throw\n # away the oldest item from the queue.\n # NOTE: Queue with the size 1 means that it is\n # safe to drop intermediate notifications.\n if notification_queue.maxsize != 1:\n LOG.warning(\n \"Subscription notification dropped! Operation %s(%s).\",\n operation_name,\n operation_id,\n )\n notification_queue.get_nowait()\n notification_queue.task_done()\n\n # Try to put the incoming item to the queue\n # within the same lock. This is an speed\n # optimization.\n try:\n notification_queue.put_nowait(payload)\n # The item was enqueued. Exit the loop.\n break\n except asyncio.QueueFull:\n # Kind'a impossible to get here, but if we\n # do, then we should retry until the queue\n # have capacity to process item.\n pass\n\n waitlist = []\n for group in groups:\n self._sids_by_group.setdefault(group, []).append(operation_id)\n waitlist.append(asyncio.create_task(self._channel_layer.group_add(group, self.channel_name)))\n self._subscriptions[operation_id] = self._SubInf(\n groups=groups,\n sid=operation_id,\n unsubscribed_callback=unsubscribed_callback,\n enqueue_notification=enqueue_notification,\n )\n if waitlist:\n await asyncio.wait(waitlist)\n\n _deserialize = channels.db.database_sync_to_async(Serializer.deserialize, thread_sensitive=False)\n\n # For each notification (event) yielded from this function the\n # `_on_gql_start__subscribe` function will call subscription\n # resolver (`publish`) via `graphql.execute` method.\n while True:\n with notification_queue_lock:\n payload = await notification_queue.get()\n data = await _deserialize(payload)\n yield data\n with notification_queue_lock:\n notification_queue.task_done()\n\n async def _on_gql_stop(self, op_id):\n \"\"\"Process the STOP message.\n\n Handle an unsubscribe request.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n LOG.debug(\"Stop handling or unsubscribe operation %s.\", op_id)\n\n # Currently only subscriptions can be stopped. But we see but\n # some clients (e.g. GraphiQL) send the stop message even for\n # queries and mutations. We also see that the Apollo server\n # ignores such messages, so we ignore them as well.\n if op_id not in self._subscriptions:\n return\n\n waitlist: List[asyncio.Task] = []\n\n # Remove the subscription from the registry.\n subinf = self._subscriptions.pop(op_id)\n\n # Cancel the task which watches the notification queue.\n consumer_task = self._notifier_tasks.pop(op_id, None)\n if consumer_task:\n consumer_task.cancel()\n waitlist.append(consumer_task)\n\n # Stop listening for corresponding groups.\n for group in subinf.groups:\n # Remove the subscription from groups it belongs to. Remove\n # the group itself from the `_sids_by_group` if there are no\n # subscriptions left in it.\n assert self._sids_by_group[group].count(op_id) == 1, (\n f\"Registry is inconsistent: group '{group}' has \" f\"{self._sids_by_group[group].count(op_id)} \" \"occurrences of op_id={op_id}!\"\n )\n self._sids_by_group[group].remove(op_id)\n if not self._sids_by_group[group]:\n del self._sids_by_group[group]\n waitlist.append(asyncio.create_task(self._channel_layer.group_discard(group, self.channel_name)))\n\n if waitlist:\n await asyncio.wait(waitlist)\n\n await subinf.unsubscribed_callback()\n\n # Send the unsubscription confirmation message.\n await self._send_gql_complete(op_id)\n\n # -------------------------------------------------------- GRAPHQL PROTOCOL MESSAGES\n\n async def _send_gql_connection_ack(self):\n \"\"\"Sent in reply to the `connection_init` request.\"\"\"\n await self.send_json({\"type\": \"connection_ack\"})\n\n async def _send_gql_connection_error(self, error: Exception):\n \"\"\"Connection error sent in reply to the `connection_init`.\"\"\"\n LOG.warning(\"GraphQL connection error: %s!\", error, exc_info=error)\n await self.send_json({\"type\": \"connection_error\", \"payload\": self._format_error(error)})\n\n async def _send_gql_data(self, op_id, data: Optional[dict], errors: Optional[Iterable[Exception]]):\n \"\"\"Send GraphQL `data` message to the client.\n\n Args:\n data: Dict with GraphQL query response.\n errors: List of exceptions occurred during processing the\n GraphQL query. (Errors happened in resolvers.)\n \"\"\"\n # Log errors with tracebacks so we can understand what happened\n # in a failed resolver.\n for ex in errors or []:\n # Typical exception here is `GraphQLLocatedError` which has\n # reference to the original error raised from a resolver.\n tb = ex.__traceback__\n LOG.warning(\n \"GraphQL resolver failed! Operation id: %s:\\n%s\",\n op_id,\n \"\".join(traceback.format_exception(type(ex), ex, tb)).strip(),\n )\n\n await self.send_json(\n {\n \"type\": \"data\",\n \"id\": op_id,\n \"payload\": {\n \"data\": data,\n **({\"errors\": [self._format_error(e) for e in errors]} if errors else {}), # type: ignore\n },\n }\n )\n\n async def _send_gql_error(self, op_id, error: Exception):\n \"\"\"Tell client there is a query processing error.\n\n Server sends this message upon a failing operation.\n It can be an unexpected or unexplained GraphQL execution error\n or a bug in the code. It is unlikely that this is GraphQL\n validation errors (such errors are part of data message and\n must be sent by the `_send_gql_data` method).\n\n Args:\n op_id: Id of the operation that failed on the server.\n error: String with the information about the error.\n\n \"\"\"\n LOG.warning(\"Operation %s processing error: %s!\", op_id, error, exc_info=error)\n formatted_error = self._format_error(error)\n await self.send_json(\n {\n \"type\": \"error\",\n \"id\": op_id,\n \"payload\": {\"errors\": [formatted_error]},\n }\n )\n\n async def _send_gql_complete(self, op_id):\n \"\"\"Send GraphQL `complete` message to the client.\n\n Args:\n op_id: Id of the corresponding operation.\n\n \"\"\"\n await self.send_json({\"type\": \"complete\", \"id\": op_id})\n\n async def _send_gql_connection_keep_alive(self):\n \"\"\"Send the keepalive (ping) message.\"\"\"\n await self.send_json({\"type\": \"ka\"})\n\n # ---------------------------------------------------------------------- AUXILIARIES\n\n @staticmethod\n def _format_error(error: Exception) -> graphql.GraphQLFormattedError:\n \"\"\"Format given exception `error` to send over a network.\n\n This function will add the \"extensions.code\" field containing an\n exception class name. A frontend may use this value to handle\n errors properly.\n\n If your backend throws an Exception, then an error will be formatted\n for a client like this:\n {\n \"id\": \"NNN\",\n \"type\": \"data\",\n \"payload\": {\n \"data\": {...},\n \"errors\": [{\n \"message\": \"Test error\",\n \"locations\": [{\"line\": NNN, \"column\": NNN}],\n \"path\": [\"somepath\"],\n \"extensions\": {\"code\": \"Exception\"}\n }]\n }\n }\n\n If you define custom exception class (`class\n CustomErr(Exception)`), then the error code in the \"extensions\"\n field will equals to the \"CustomErr\":\n \"extensions\": {\"code\": \"Exception\"}\n\n There is a special case of errors on connection. They behave\n using same logic: in the \"code\" field there will be an\n exception class name:\n {\n \"payload\": {\n \"message\": \"message from a exception\",\n \"extensions\": {\"code\": \"UserUnauthenticatedError\"}\n },\n \"type\": \"connection_error\"\n }\n\n NOTE: If you need to add more fields to the error, then override\n this function in a subclass. Another way to enrich errors is to\n use a GraphQLError based classes for your exceptions.\n \"\"\"\n if isinstance(error, graphql.error.GraphQLError):\n if error.extensions and \"code\" not in error.extensions:\n if error.original_error:\n error.extensions[\"code\"] = type(error.original_error).__name__\n return error.formatted\n\n # Usually the GraphQL-core library wraps any exception with\n # GraphQLError. So this code should be unreachable, unless there\n # are some bugs in the library.\n return {\n \"message\": f\"{type(error).__name__}: {str(error)}\",\n \"extensions\": {\"code\": type(error).__name__},\n }\n\n def _spawn_background_task(self, awaitable):\n \"\"\"Spawn background task.\n\n Tasks are canceled and awaited when a client disconnects.\n Args:\n awaitable: An awaitable to run in a task.\n Returns:\n A started `asyncio.Task` instance.\n\n \"\"\"\n background_task = asyncio.create_task(awaitable)\n self._background_tasks.add(background_task)\n return background_task\n\n @property\n def _channel_layer(self):\n \"\"\"Channel layer.\"\"\"\n # We cannot simply check existence of channel layer in the\n # consumer constructor, so we added this property.\n assert self.channel_layer is not None, \"Channel layer is not configured!\"\n return self.channel_layer" }, { "identifier": "Serializer", "path": "helpers/channels_graphql_ws/serializer.py", "snippet": "class Serializer:\n \"\"\"Serialize/deserialize Python collection with Django models.\n\n Serialize/deserialize the data with the MessagePack like Redis\n Channels layer backend does.\n\n If `data` contains Django models, then it is serialized by the\n Django serialization utilities. For details see:\n Django serialization:\n https://docs.djangoproject.com/en/dev/topics/serialization/\n MessagePack:\n https://github.com/msgpack/msgpack-python\n \"\"\"\n\n @staticmethod\n def serialize(data):\n \"\"\"Serialize the `data`.\"\"\"\n\n def encode_extra_types(obj):\n \"\"\"MessagePack hook to serialize extra types.\n\n The recipe took from the MessagePack for Python docs:\n https://github.com/msgpack/msgpack-python#packingunpacking-of-custom-data-type\n\n Supported types:\n - Django models (through `django.core.serializers`).\n - Python `datetime` types:\n - `datetime.datetime`\n - `datetime.date`\n - `datetime.time`\n\n \"\"\"\n if isinstance(obj, django.db.models.Model):\n return {\n \"__djangomodel__\": True,\n \"as_str\": django.core.serializers.serialize(\"json\", [obj]),\n }\n if isinstance(obj, datetime.datetime):\n return {\"__datetime__\": True, \"as_str\": obj.isoformat()}\n if isinstance(obj, datetime.date):\n return {\"__date__\": True, \"as_str\": obj.isoformat()}\n if isinstance(obj, datetime.time):\n return {\"__time__\": True, \"as_str\": obj.isoformat()}\n return obj\n\n return msgpack.packb(data, default=encode_extra_types, use_bin_type=True)\n\n @staticmethod\n def deserialize(data):\n \"\"\"Deserialize the `data`.\"\"\"\n\n def decode_extra_types(obj):\n \"\"\"MessagePack hook to deserialize extra types.\"\"\"\n if \"__djangomodel__\" in obj:\n obj = next(django.core.serializers.deserialize(\"json\", obj[\"as_str\"])).object\n elif \"__datetime__\" in obj:\n obj = datetime.datetime.fromisoformat(obj[\"as_str\"])\n elif \"__date__\" in obj:\n obj = datetime.date.fromisoformat(obj[\"as_str\"])\n elif \"__time__\" in obj:\n obj = datetime.time.fromisoformat(obj[\"as_str\"])\n return obj\n\n return msgpack.unpackb(data, object_hook=decode_extra_types, raw=False)" } ]
import asyncio import collections import hashlib import logging import asgiref.sync import channels.db import channels.layers import graphene import graphene.types.objecttype import graphene.types.utils import graphene.utils.get_unbound_function import graphene.utils.props from typing import Optional from .graphql_ws_consumer import GraphqlWsConsumer from .serializer import Serializer
12,692
Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Required. Args: payload: The `payload` from the `broadcast` invocation. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The same that any Graphene resolver returns. [async] subscribe(root, info, *args, **kwds): Called when client subscribes. Define this to do some extra work when client subscribes and to group subscriptions into different subscription groups. Method signature is the same as in other GraphQL "resolver" methods but it may return the subscription groups names to put the subscription into. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Optional. Args: root: Root resolver object. Typically `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The list or tuple of subscription group names this subscription instance belongs to. Later the subscription will trigger on publishes to any of that groups. If method returns None (default behavior) then the subscription is only put to the default group (the one which corresponds to the `Subscription` subclass). [async] unsubscribed(root, info, *args, **kwds): Called when client unsubscribes. Define this to be notified when client unsubscribes. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Args: root: Always `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. The methods enlisted above receives "standard" set of GraphQL resolver arguments. The `info` field has `context` which can be used to transmit some useful payload between these methods. For example if `subscribe` sets `info.context.zen=42` then `publish` will have access to this value as `info.context.zen`. Static methods of subscription subclass: broadcast(): Call this to notify all subscriptions in the group. unsubscribe(): Call this to stop all subscriptions in the group. NOTE: If you call any of these methods from the asynchronous context then `await` the result of the call. """ # ----------------------------------------------------------------------- PUBLIC API # Subscription notifications queue limit. Set this to control the # amount of notifications server keeps in the queue when # notifications come faster than server processes them. Setting this # to 1 drops all notifications in the queue except the latest one. # Useful to skip intermediate notifications, e.g. progress reports. notification_queue_limit: Optional[int] = None @classmethod def broadcast(cls, *, group=None, payload=None): """Call this method to notify all subscriptions in the group. Can be called from both synchronous and asynchronous contexts. It is necessary to `await` if called from the async context. Args: group: Name of the subscription group which members must be notified. `None` means that all the subscriptions of type will be triggered. payload: The payload delivered to the `publish` handler. NOTE: The `payload` is serialized before sending to the subscription group. """ try: event_loop = asyncio.get_event_loop() except RuntimeError: pass else: if event_loop.is_running(): return event_loop.create_task(cls.broadcast_async(group=group, payload=payload)) return cls.broadcast_sync(group=group, payload=payload) @classmethod async def broadcast_async(cls, *, group=None, payload=None): """Broadcast, asynchronous version.""" # Manually serialize the `payload` to allow transfer of Django # models inside `payload`, auto serialization does not do this.
# Copyright (C) DATADVANCE, 2010-2023 # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Graphene-like subscription class. The `Subscription` class itself is a "creative" copy of `Mutation` class from the Graphene (`graphene/types/mutation.py`). """ # Module logger. LOG = logging.getLogger(__name__) class Subscription(graphene.ObjectType): """Subscription type definition. Subclass this the Subscription class to define a GraphQL subscription. The class works with the `GraphqlWsConsumer` which maintains a WebSocket connection with the client. The subclass specifies the following methods. You can define each of them as a `@classmethod`, as a `@staticmethod`, or even as a regular method (like Graphene typically does). It shall work fine either way. NOTE, if you define the method as a regular method (not a classmethod or a staticmethod) you will receive the first argument (`payload`/`root`) into the `self` argument. [async] publish(payload, info, *args, **kwds): This method invoked each time subscription "triggers". Raising an exception here will lead to sending the notification with the error. Technically the WebSocket message will contain extra field "extensions.code" holding the classname of the exception raised. To suppress the notification return `None`. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Required. Args: payload: The `payload` from the `broadcast` invocation. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The same that any Graphene resolver returns. [async] subscribe(root, info, *args, **kwds): Called when client subscribes. Define this to do some extra work when client subscribes and to group subscriptions into different subscription groups. Method signature is the same as in other GraphQL "resolver" methods but it may return the subscription groups names to put the subscription into. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Optional. Args: root: Root resolver object. Typically `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The list or tuple of subscription group names this subscription instance belongs to. Later the subscription will trigger on publishes to any of that groups. If method returns None (default behavior) then the subscription is only put to the default group (the one which corresponds to the `Subscription` subclass). [async] unsubscribed(root, info, *args, **kwds): Called when client unsubscribes. Define this to be notified when client unsubscribes. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Args: root: Always `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. The methods enlisted above receives "standard" set of GraphQL resolver arguments. The `info` field has `context` which can be used to transmit some useful payload between these methods. For example if `subscribe` sets `info.context.zen=42` then `publish` will have access to this value as `info.context.zen`. Static methods of subscription subclass: broadcast(): Call this to notify all subscriptions in the group. unsubscribe(): Call this to stop all subscriptions in the group. NOTE: If you call any of these methods from the asynchronous context then `await` the result of the call. """ # ----------------------------------------------------------------------- PUBLIC API # Subscription notifications queue limit. Set this to control the # amount of notifications server keeps in the queue when # notifications come faster than server processes them. Setting this # to 1 drops all notifications in the queue except the latest one. # Useful to skip intermediate notifications, e.g. progress reports. notification_queue_limit: Optional[int] = None @classmethod def broadcast(cls, *, group=None, payload=None): """Call this method to notify all subscriptions in the group. Can be called from both synchronous and asynchronous contexts. It is necessary to `await` if called from the async context. Args: group: Name of the subscription group which members must be notified. `None` means that all the subscriptions of type will be triggered. payload: The payload delivered to the `publish` handler. NOTE: The `payload` is serialized before sending to the subscription group. """ try: event_loop = asyncio.get_event_loop() except RuntimeError: pass else: if event_loop.is_running(): return event_loop.create_task(cls.broadcast_async(group=group, payload=payload)) return cls.broadcast_sync(group=group, payload=payload) @classmethod async def broadcast_async(cls, *, group=None, payload=None): """Broadcast, asynchronous version.""" # Manually serialize the `payload` to allow transfer of Django # models inside `payload`, auto serialization does not do this.
serialized_payload = await channels.db.database_sync_to_async(Serializer.serialize, thread_sensitive=False)(payload)
1
2023-12-25 11:40:56+00:00
16k
facebookresearch/ca_body
ca_body/models/mesh_vae_drivable.py
[ { "identifier": "ConvBlock", "path": "ca_body/nn/blocks.py", "snippet": "class ConvBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n size,\n lrelu_slope=0.2,\n kernel_size=3,\n padding=1,\n wnorm_dim=0,\n ):\n super().__init__()\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n\n # TODO: do we really need this?\n self.conv_resize = Conv2dWN(in_channels, out_channels, kernel_size=1)\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_skip = self.conv_resize(x)\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n return x + x_skip" }, { "identifier": "ConvDownBlock", "path": "ca_body/nn/blocks.py", "snippet": "class ConvDownBlock(nn.Module):\n def __init__(self, in_channels, out_channels, size, lrelu_slope=0.2, groups=1, wnorm_dim=0):\n \"\"\"Constructor.\n\n Args:\n in_channels: int, # of input channels\n out_channels: int, # of input channels\n size: the *input* size\n \"\"\"\n super().__init__()\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n\n self.conv_resize = Conv2dWN(\n in_channels, out_channels, kernel_size=1, stride=2, groups=groups\n )\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=3,\n height=size,\n width=size,\n groups=groups,\n padding=1,\n )\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=3,\n stride=2,\n height=size // 2,\n width=size // 2,\n groups=groups,\n padding=1,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_skip = self.conv_resize(x)\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n return x + x_skip" }, { "identifier": "UpConvBlockDeep", "path": "ca_body/nn/blocks.py", "snippet": "class UpConvBlockDeep(nn.Module):\n def __init__(self, in_channels, out_channels, size, lrelu_slope=0.2, wnorm_dim=0, groups=1):\n super().__init__()\n self.upsample = nn.UpsamplingBilinear2d(size)\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n # NOTE: the old one normalizes only across one dimension\n\n self.conv_resize = Conv2dWN(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n groups=groups,\n )\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=3,\n height=size,\n width=size,\n padding=1,\n groups=groups,\n )\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=3,\n height=size,\n width=size,\n padding=1,\n groups=groups,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_up = self.upsample(x)\n x_skip = self.conv_resize(x_up)\n\n x = x_up\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n\n return x + x_skip" }, { "identifier": "tile2d", "path": "ca_body/nn/blocks.py", "snippet": "def tile2d(x, size: int):\n \"\"\"Tile a given set of features into a convolutional map.\n\n Args:\n x: float tensor of shape [N, F]\n size: int or a tuple\n\n Returns:\n a feature map [N, F, size[0], size[1]]\n \"\"\"\n # size = size if isinstance(size, tuple) else (size, size)\n # NOTE: expecting only int here (!!!)\n return x[:, :, np.newaxis, np.newaxis].expand(-1, -1, size, size)" }, { "identifier": "weights_initializer", "path": "ca_body/nn/blocks.py", "snippet": "def weights_initializer(lrelu_slope=0.2):\n # pyre-ignore\n def init_fn(m):\n if isinstance(\n m,\n (\n nn.Conv2d,\n nn.Conv1d,\n nn.ConvTranspose2d,\n nn.Linear,\n ),\n ):\n gain = nn.init.calculate_gain(\"leaky_relu\", lrelu_slope)\n nn.init.kaiming_uniform_(m.weight.data, a=gain)\n if hasattr(m, \"bias\") and m.bias is not None:\n nn.init.zeros_(m.bias.data)\n else:\n logger.debug(f\"skipping initialization for {m}\")\n\n return init_fn" }, { "identifier": "LearnableBlur", "path": "ca_body/nn/dof_cal.py", "snippet": "class LearnableBlur(nn.Module):\n # TODO: should we make this conditional?\n def __init__(self, cameras: List[str]) -> None:\n super().__init__()\n self.cameras = cameras\n self.register_parameter(\n \"weights_raw\", nn.Parameter(th.ones(len(cameras), 3, dtype=th.float32))\n )\n\n def name_to_idx(self, cameras: List[str]) -> th.Tensor:\n return th.tensor(\n [self.cameras.index(c) for c in cameras],\n device=self.weights_raw.device,\n dtype=th.long,\n )\n\n # pyre-ignore\n def reg(self, cameras: List[str]):\n # pyre-ignore\n idxs = self.name_to_idx(cameras)\n # pyre-ignore\n return self.weights_raw[idxs]\n\n # pyre-ignore\n def forward(self, img: th.Tensor, cameras: List[str]):\n B = img.shape[0]\n # B, C, H, W\n idxs = self.name_to_idx(cameras)\n # TODO: mask?\n # pyre-ignore\n weights = th.softmax(self.weights_raw[idxs], dim=-1)\n weights = weights.reshape(B, 3, 1, 1, 1)\n return (\n weights[:, 0] * img\n + weights[:, 1] * gaussian_blur(img, [3, 3])\n + weights[:, 2] * gaussian_blur(img, [7, 7])\n )" }, { "identifier": "GeometryModule", "path": "ca_body/utils/geom.py", "snippet": "class GeometryModule(nn.Module):\n def __init__(\n self,\n vi,\n vt,\n vti,\n v2uv,\n uv_size,\n flip_uv=False,\n impaint=False,\n impaint_threshold=100.0,\n ):\n super().__init__()\n\n self.register_buffer(\"vi\", th.as_tensor(vi))\n self.register_buffer(\"vt\", th.as_tensor(vt))\n self.register_buffer(\"vti\", th.as_tensor(vti))\n self.register_buffer(\"v2uv\", th.as_tensor(v2uv, dtype=th.int64))\n\n # TODO: should we just pass topology here?\n self.n_verts = v2uv.shape[0]\n\n self.uv_size = uv_size\n\n # TODO: can't we just index face_index?\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n ).cpu()\n face_index, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n if impaint:\n if uv_size >= 1024:\n logger.info(\n \"impainting index image might take a while for sizes >= 1024\"\n )\n\n index_image, bary_image = index_image_impaint(\n index_image, bary_image, impaint_threshold\n )\n # TODO: we can avoid doing this 2x\n face_index = index_image_impaint(\n face_index, distance_threshold=impaint_threshold\n )\n\n self.register_buffer(\"index_image\", index_image.cpu())\n self.register_buffer(\"bary_image\", bary_image.cpu())\n self.register_buffer(\"face_index_image\", face_index.cpu())\n\n def render_index_images(self, uv_size, flip_uv=False, impaint=False):\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n face_image, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n\n if impaint:\n index_image, bary_image = index_image_impaint(\n index_image,\n bary_image,\n )\n\n return index_image, face_image, bary_image\n\n def vn(self, verts):\n return vert_normals(verts, self.vi[np.newaxis].to(th.long))\n\n def to_uv(self, values):\n return values_to_uv(values, self.index_image, self.bary_image)\n\n def from_uv(self, values_uv):\n # TODO: we need to sample this\n return sample_uv(values_uv, self.vt, self.v2uv.to(th.long))" }, { "identifier": "compute_view_cos", "path": "ca_body/utils/geom.py", "snippet": "def compute_view_cos(verts, faces, camera_pos):\n vn = F.normalize(vert_normals(verts, faces), dim=-1)\n v2c = F.normalize(verts - camera_pos[:, np.newaxis], dim=-1)\n return th.einsum(\"bnd,bnd->bn\", vn, v2c)" }, { "identifier": "depth_discontuity_mask", "path": "ca_body/utils/geom.py", "snippet": "def depth_discontuity_mask(\n depth: th.Tensor, threshold: float = 40.0, kscale: float = 4.0, pool_ksize: int = 3\n) -> th.Tensor:\n device = depth.device\n\n with th.no_grad():\n # TODO: pass the kernel?\n kernel = th.as_tensor(\n [\n [[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]],\n [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]],\n ],\n dtype=th.float32,\n device=device,\n )\n\n disc_mask = (th.norm(F.conv2d(depth, kernel, bias=None, padding=1), dim=1) > threshold)[\n :, np.newaxis\n ]\n disc_mask = (\n F.avg_pool2d(disc_mask.float(), pool_ksize, stride=1, padding=pool_ksize // 2) > 0.0\n )\n\n return disc_mask" }, { "identifier": "depth2normals", "path": "ca_body/utils/geom.py", "snippet": "def depth2normals(depth, focal, princpt) -> th.Tensor:\n \"\"\"Convert depth image to normal image using camera intrinsics\n\n Args:\n depth: th.Tensor\n [B, 1, H, W] depth image\n\n focal: th.Tensor\n [B, 2, 2] camera focal lengths\n\n princpt: th.Tensor\n [B, 2] camera principal points\n\n Returns:\n th.Tensor: [B, 3, H, W] normal image\n \"\"\"\n\n return xyz2normals(depth2xyz(depth, focal, princpt))" }, { "identifier": "ShadowUNet", "path": "ca_body/nn/shadow.py", "snippet": "class ShadowUNet(nn.Module):\n def __init__(\n self,\n uv_size,\n ao_mean,\n shadow_size,\n lrelu_slope=0.2,\n beta=1.0,\n n_dims=64,\n interp_mode=\"bilinear\",\n biases=True,\n trainable_mean=False,\n ):\n super().__init__()\n\n # this is the size of the output\n self.uv_size = uv_size\n self.shadow_size = shadow_size\n\n ao_mean = F.interpolate(\n th.as_tensor(ao_mean)[np.newaxis],\n size=(self.shadow_size, self.shadow_size),\n )[0]\n if not trainable_mean:\n # TODO:\n self.register_buffer(\"ao_mean\", ao_mean)\n else:\n self.register_parameter(\"ao_mean\", th.nn.Parameter(ao_mean))\n\n self.depth = 3\n self.lrelu_slope = lrelu_slope\n self.interp_mode = interp_mode\n self.align_corners = None\n if interp_mode == \"bilinear\":\n self.align_corners = False\n\n # the base number of dimensions for the shadow maps\n n_dims = n_dims\n\n # TODO: generate this?\n self.n_enc_dims = [\n (1, n_dims),\n (n_dims, n_dims),\n (n_dims, n_dims),\n (n_dims, n_dims),\n ]\n\n self.sizes = [shadow_size // (2**i) for i in range(len(self.n_enc_dims))]\n\n logger.debug(f\"sizes: {self.sizes}\")\n\n self.enc_layers = nn.ModuleList()\n for i, size in enumerate(self.sizes):\n n_in, n_out = self.n_enc_dims[i]\n logger.debug(f\"EncoderLayers({i}): {n_in}, {n_out}, {size}\")\n self.enc_layers.append(\n nn.Sequential(\n la.Conv2dWNUB(\n n_in,\n n_out,\n kernel_size=3,\n height=size,\n width=size,\n stride=1,\n padding=1,\n ),\n nn.LeakyReLU(self.lrelu_slope, inplace=True),\n )\n )\n\n self.n_dec_dims = [\n (n_dims, n_dims),\n (n_dims * 2, n_dims),\n (n_dims * 2, n_dims),\n (n_dims * 2, n_dims),\n ]\n self.dec_layers = nn.ModuleList()\n for i in range(len(self.sizes)):\n size = self.sizes[-i - 1]\n n_in, n_out = self.n_dec_dims[i]\n logger.debug(f\"DecoderLayer({i}): {n_in}, {n_out}, {size}\")\n\n self.dec_layers.append(\n nn.Sequential(\n la.Conv2dWNUB(\n n_in,\n n_out,\n kernel_size=3,\n height=size,\n width=size,\n stride=1,\n padding=1,\n ),\n nn.LeakyReLU(self.lrelu_slope, inplace=True),\n )\n )\n\n self.apply(weights_initializer(self.lrelu_slope))\n\n if biases:\n self.shadow_pred = la.Conv2dWNUB(\n self.n_dec_dims[-1][-1],\n 1,\n kernel_size=3,\n height=self.sizes[0],\n width=self.sizes[0],\n stride=1,\n padding=1,\n )\n else:\n self.shadow_pred = la.Conv2dWN(\n self.n_dec_dims[-1][-1],\n 1,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n\n self.shadow_pred.apply(weights_initializer(1.0))\n self.beta = beta\n\n def forward(self, ao_map):\n # resizing the inputs if necessary\n if ao_map.shape[-2:] != (self.shadow_size, self.shadow_size):\n ao_map = F.interpolate(ao_map, size=(self.shadow_size, self.shadow_size))\n\n x = ao_map - self.ao_mean\n\n enc_acts = []\n # unet enc\n for i, layer in enumerate(self.enc_layers):\n # TODO: try applying a 1D sparse op?\n x = layer(x)\n enc_acts.append(x)\n # TODO: add this layer elsewhere?\n if i < len(self.sizes) - 1:\n x = F.interpolate(\n x,\n scale_factor=0.5,\n mode=\"bilinear\",\n recompute_scale_factor=True,\n align_corners=True,\n )\n\n # we do not need the last one?\n for i, layer in enumerate(self.dec_layers):\n if i > 0:\n x_prev = enc_acts[-i - 1]\n x = F.interpolate(x, size=x_prev.shape[2:4], mode=\"bilinear\", align_corners=True)\n x = th.cat([x, x_prev], dim=1)\n x = layer(x)\n\n shadow_map_lowres = th.sigmoid(self.shadow_pred(x) + self.beta)\n shadow_map = F.interpolate(\n shadow_map_lowres,\n (self.uv_size, self.uv_size),\n mode=self.interp_mode,\n align_corners=self.align_corners,\n )\n\n return {\n \"shadow_map\": shadow_map,\n \"ao_map\": ao_map,\n \"shadow_map_lowres\": shadow_map_lowres,\n }" }, { "identifier": "PoseToShadow", "path": "ca_body/nn/shadow.py", "snippet": "class PoseToShadow(nn.Module):\n def __init__(\n self,\n n_pose_dims,\n uv_size,\n beta=1.0,\n ) -> None:\n super().__init__()\n self.n_pose_dims = n_pose_dims\n self.uv_size = uv_size\n\n self.fc_block = nn.Sequential(\n la.LinearWN(self.n_pose_dims, 256 * 4 * 4),\n nn.LeakyReLU(0.2),\n )\n self.conv_block = nn.Sequential(\n la.ConvTranspose2dWNUB(256, 256, 8, 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(256, 128, 16, 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(128, 128, 32, 32, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(128, 64, 64, 64, 4, 2, 1),\n nn.LeakyReLU(0.2),\n # la.ConvTranspose2dWNUB(64, 64, 128, 128, 4, 2, 1),\n # nn.LeakyReLU(0.2),\n # la.ConvTranspose2dWNUB(64, 1, 256, 256, 4, 2, 1),\n la.ConvTranspose2dWNUB(64, 1, 128, 128, 4, 2, 1),\n )\n self.beta = beta\n self.apply(lambda x: la.glorot(x, 0.2))\n la.glorot(self.conv_block[-1], 1.0)\n\n def forward(self, pose: th.Tensor):\n assert pose.shape\n x = self.fc_block(pose)\n x = self.conv_block(x.reshape(-1, 256, 4, 4))\n shadow_map_lowres = th.sigmoid(x + self.beta)\n\n shadow_map = F.interpolate(\n shadow_map_lowres, size=(self.uv_size, self.uv_size), mode=\"bilinear\"\n )\n return {\"shadow_map\": shadow_map}" }, { "identifier": "UNetWB", "path": "ca_body/nn/unet.py", "snippet": "class UNetWB(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n size: int,\n n_init_ftrs: int=8,\n out_scale: float =0.1,\n ):\n # super().__init__(*args, **kwargs)\n super().__init__()\n\n self.out_scale = out_scale\n\n F = n_init_ftrs\n\n self.size = size\n\n self.down1 = nn.Sequential(\n Conv2dWNUB(in_channels, F, self.size // 2, self.size // 2, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down2 = nn.Sequential(\n Conv2dWNUB(F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down3 = nn.Sequential(\n Conv2dWNUB(2 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down4 = nn.Sequential(\n Conv2dWNUB(4 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down5 = nn.Sequential(\n Conv2dWNUB(8 * F, 16 * F, self.size // 32, self.size // 32, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up1 = nn.Sequential(\n ConvTranspose2dWNUB(16 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up2 = nn.Sequential(\n ConvTranspose2dWNUB(8 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up3 = nn.Sequential(\n ConvTranspose2dWNUB(4 * F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up4 = nn.Sequential(\n ConvTranspose2dWNUB(2 * F, F, self.size // 2, self.size // 2, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up5 = nn.Sequential(\n ConvTranspose2dWNUB(F, F, self.size, self.size, 4, 2, 1), nn.LeakyReLU(0.2)\n )\n self.out = Conv2dWNUB(F + in_channels, out_channels, self.size, self.size, kernel_size=1)\n self.apply(lambda x: glorot(x, 0.2))\n glorot(self.out, 1.0)\n\n def forward(self, x):\n x1 = x\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x6 = self.down5(x5)\n # TODO: switch to concat?\n x = self.up1(x6) + x5\n x = self.up2(x) + x4\n x = self.up3(x) + x3\n x = self.up4(x) + x2\n x = self.up5(x)\n x = th.cat([x, x1], dim=1)\n return self.out(x) * self.out_scale" }, { "identifier": "CalV5", "path": "ca_body/nn/color_cal.py", "snippet": "class CalV5(CalBase):\n def __init__(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n cameras,\n # pyre-fixme[2]: Parameter must be annotated.\n identity_camera,\n gs_lrscale: float = 1e0,\n col_lrscale: float = 1e-1,\n ) -> None:\n super(CalBase, self).__init__()\n\n if identity_camera not in cameras:\n identity_camera = cameras[0]\n logger.warning(\n f\"Requested color-calibration identity camera not present, defaulting to {identity_camera}.\"\n )\n\n # pyre-fixme[4]: Attribute must be annotated.\n self.identity_camera = identity_camera\n # pyre-fixme[4]: Attribute must be annotated.\n self.cameras = cameras\n self.gs_lrscale = gs_lrscale\n self.col_lrscale = col_lrscale\n self.holder: ParamHolder = ParamHolder(\n # pyre-fixme[6]: For 1st param expected `Tuple[int]` but got `int`.\n 3 + 3,\n cameras,\n init_value=th.FloatTensor([1, 1, 1, 0, 0, 0]),\n )\n\n # pyre-fixme[4]: Attribute must be annotated.\n self.identity_idx = self.holder.to_idx([identity_camera]).item()\n # pyre-fixme[4]: Attribute must be annotated.\n self.grey_idxs = [self.holder.to_idx([c]).item() for c in cameras if c.startswith(\"41\")]\n\n s = th.FloatTensor([0.37, 0.52, 0.52])\n self.holder.params.data[th.LongTensor(self.grey_idxs), :3] = s\n\n def name_to_idx(self, cam_names: Sequence[str]) -> th.Tensor:\n return self.holder.to_idx(cam_names)\n\n # pyre-fixme[2]: Parameter must be annotated.\n def initialize_from_texs(self, ds) -> float:\n tex_mean = ds.tex_mean.permute(1, 2, 0)\n texs = {}\n idx = 0\n while ds[idx] is None:\n idx += 1\n\n for cam in self.cameras:\n samp = ds[idx, cam]\n if samp is None:\n continue\n\n tex = samp[\"tex\"]\n texs[cam] = tex.permute(1, 2, 0)\n\n stats = {}\n for cam in texs.keys():\n t = texs[cam]\n mask = (t > 0).all(dim=2)\n t = t * ds.tex_std + tex_mean\n stats[cam] = (t[mask].mean(dim=0), t[mask].std(dim=0))\n\n normstats = {}\n for cam in texs.keys():\n mean, std = stats[cam]\n imean, istd = stats[self.identity_camera]\n scale = istd / std\n bias = imean - scale * mean\n normstats[cam] = (scale.clamp(max=2), bias)\n\n for cam, nstats in normstats.items():\n cidx = self.name_to_idx([cam])[0]\n if cidx in self.grey_idxs:\n nstats = (nstats[0] / 3, nstats[1] / 3)\n self.holder.params.data[cidx, 0:3] = nstats[0]\n self.holder.params.data[cidx, 3:6] = nstats[1]\n return len(stats.keys()) / len(ds.cameras)\n\n # pyre-fixme[3]: Return type must be annotated.\n # pyre-fixme[2]: Parameter must be annotated.\n # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`\n # inconsistently.\n def load_state_dict(self, state_dict, strict: bool = True):\n state_dict = {k[7:]: v for k, v in state_dict.items() if k.startswith(\"holder.\")}\n return self.holder.load_state_dict(state_dict, strict=strict)\n\n # pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.\n # pyre-fixme[3]: Return type must be annotated.\n def state_dict(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n destination=None,\n prefix: str = \"\",\n keep_vars: bool = False,\n saving: bool = False,\n ):\n sd = super(CalBase, self).state_dict(\n destination=destination, prefix=prefix, keep_vars=keep_vars\n )\n if saving:\n sd[prefix + \"holder.key_list\"] = self.holder.key_list\n return sd\n\n def forward(self, image: th.Tensor, cam_idxs: th.Tensor) -> th.Tensor:\n params = self.holder(cam_idxs)\n outs = []\n hook_scales = []\n for i in range(cam_idxs.shape[0]):\n idx = cam_idxs[i]\n img = image[i : i + 1]\n if idx == self.identity_idx:\n outs.append(img)\n hook_scales.append(1)\n continue\n\n w, b = params[i, :3], params[i, 3:]\n if idx in self.grey_idxs:\n b = b.sum()\n out = (img * w[None, :, None, None]).sum(dim=1, keepdim=True).expand(\n -1, 3, -1, -1\n ) + b\n else:\n out = img * w[None, :, None, None] + b[None, :, None, None]\n outs.append(out)\n hook_scales.append(self.gs_lrscale if idx in self.grey_idxs else self.col_lrscale)\n\n hook_scales = th.tensor(hook_scales, device=image.device, dtype=th.float32)\n cal_out = th.cat(outs)\n\n if self.training and params.requires_grad:\n params.register_hook(lambda g, hs=hook_scales: scale_hook(g, hs[:, None]))\n return cal_out" }, { "identifier": "linear2displayBatch", "path": "ca_body/utils/image.py", "snippet": "def linear2displayBatch(\n val: th.Tensor,\n gamma: float = 1.5,\n wbscale: np.ndarray = __DEFAULT_WB_SCALE,\n black: float = 5.0 / 255.0,\n mode: str = \"srgb\",\n) -> th.Tensor:\n scaling: th.Tensor = th.from_numpy(wbscale).to(val.device)\n val = val.float() / 255.0 * scaling[None, :, None, None] - black\n if mode == \"srgb\":\n val = linear2srgb(val, gamma=gamma)\n else:\n val = val ** th.tensor(1.0 / gamma)\n return th.clamp(val, 0, 1) * 255.0" }, { "identifier": "LBSModule", "path": "ca_body/utils/lbs.py", "snippet": "class LBSModule(nn.Module):\n def __init__(\n self, lbs_model_json, lbs_config_dict, lbs_template_verts, lbs_scale, global_scaling\n ):\n super().__init__()\n self.lbs_fn = LinearBlendSkinning(lbs_model_json, lbs_config_dict)\n\n self.register_buffer(\"lbs_scale\", th.as_tensor(lbs_scale, dtype=th.float32))\n self.register_buffer(\n \"lbs_template_verts\", th.as_tensor(lbs_template_verts, dtype=th.float32)\n )\n self.register_buffer(\"global_scaling\", th.as_tensor(global_scaling))\n\n def pose(self, verts_unposed, motion, template: Optional[th.Tensor] = None):\n scale = self.lbs_scale.expand(motion.shape[0], -1)\n if template is None:\n template = self.lbs_template_verts\n return self.lbs_fn(motion, scale, verts_unposed + template) * self.global_scaling\n\n def unpose(self, verts, motion):\n B = motion.shape[0]\n scale = self.lbs_scale.expand(B, -1)\n return (\n self.lbs_fn.unpose(motion, scale, verts / self.global_scaling) - self.lbs_template_verts\n )\n\n def template_pose(self, motion):\n B = motion.shape[0]\n scale = self.lbs_scale.expand(B, -1)\n verts = self.lbs_template_verts[np.newaxis].expand(B, -1, -1)\n return self.lbs_fn(motion, scale, verts) * self.global_scaling[np.newaxis]" }, { "identifier": "RenderLayer", "path": "ca_body/utils/render.py", "snippet": "class RenderLayer(nn.Module):\n \n def __init__(self, h, w, vi, vt, vti, flip_uvs=False):\n super().__init__()\n self.register_buffer(\"vi\", vi, persistent=False)\n self.register_buffer(\"vt\", vt, persistent=False)\n self.register_buffer(\"vti\", vti, persistent=False)\n raster_settings = RasterizationSettings(image_size=(h, w))\n self.rasterizer = MeshRasterizer(raster_settings=raster_settings)\n self.flip_uvs = flip_uvs \n image_size = th.as_tensor([h, w], dtype=th.int32)\n self.register_buffer(\"image_size\", image_size)\n \n def forward(self, verts: th.Tensor, tex: th.Tensor, K: th.Tensor, Rt: th.Tensor, background: th.Tensor = None, output_filters: List[str] = None):\n\n assert output_filters is None\n assert background is None\n\n B = verts.shape[0]\n\n image_size = self.image_size[None].repeat(B, 1)\n \n cameras = cameras_from_opencv_projection(Rt[:,:,:3], Rt[:,:3,3], K, image_size)\n\n faces = self.vi[None].repeat(B, 1, 1)\n faces_uvs = self.vti[None].repeat(B, 1, 1)\n verts_uvs = self.vt[None].repeat(B, 1, 1) \n \n # NOTE: this is confusing but correct\n if not self.flip_uvs:\n tex = th.flip(tex.permute(0, 2, 3, 1), (1,))\n\n textures = TexturesUV(\n maps=tex,\n faces_uvs=faces_uvs,\n verts_uvs=verts_uvs,\n ) \n meshes = Meshes(verts, faces, textures=textures)\n \n fragments = self.rasterizer(meshes, cameras=cameras)\n rgb = meshes.sample_textures(fragments)[:,:,:,0] \n rgb[fragments.pix_to_face[...,0] == -1] = 0.0 \n\n return {'render': rgb.permute(0, 3, 1, 2)}" }, { "identifier": "SeamSampler", "path": "ca_body/utils/seams.py", "snippet": "class SeamSampler(nn.Module):\n def __init__(self, seamless_data: Dict[str, Any]) -> None:\n super().__init__()\n\n self.register_buffer(\"dst_ij\", seamless_data[\"dst_ij\"])\n self.register_buffer(\"src_ij\", seamless_data[\"src_ij\"])\n self.register_buffer(\"uvs\", seamless_data[\"uvs\"])\n self.register_buffer(\"weights\", seamless_data[\"weights\"])\n\n def impaint(self, value: th.Tensor) -> th.Tensor:\n return impaint_batch(value, self.dst_ij, self.src_ij)\n\n def resample(self, tex: th.Tensor) -> th.Tensor:\n return resample_tex(tex, self.uvs, self.weights)\n\n def resample_border_only(self, tex: th.Tensor) -> th.Tensor:\n tex = resample_tex(tex, self.uvs, self.weights)\n return tex\n\n def forward(self, tex: th.Tensor) -> th.Tensor:\n x = self.impaint(tex)\n x = self.resample(x)\n return x" }, { "identifier": "RenderLayer", "path": "ca_body/utils/render.py", "snippet": "class RenderLayer(nn.Module):\n \n def __init__(self, h, w, vi, vt, vti, flip_uvs=False):\n super().__init__()\n self.register_buffer(\"vi\", vi, persistent=False)\n self.register_buffer(\"vt\", vt, persistent=False)\n self.register_buffer(\"vti\", vti, persistent=False)\n raster_settings = RasterizationSettings(image_size=(h, w))\n self.rasterizer = MeshRasterizer(raster_settings=raster_settings)\n self.flip_uvs = flip_uvs \n image_size = th.as_tensor([h, w], dtype=th.int32)\n self.register_buffer(\"image_size\", image_size)\n \n def forward(self, verts: th.Tensor, tex: th.Tensor, K: th.Tensor, Rt: th.Tensor, background: th.Tensor = None, output_filters: List[str] = None):\n\n assert output_filters is None\n assert background is None\n\n B = verts.shape[0]\n\n image_size = self.image_size[None].repeat(B, 1)\n \n cameras = cameras_from_opencv_projection(Rt[:,:,:3], Rt[:,:3,3], K, image_size)\n\n faces = self.vi[None].repeat(B, 1, 1)\n faces_uvs = self.vti[None].repeat(B, 1, 1)\n verts_uvs = self.vt[None].repeat(B, 1, 1) \n \n # NOTE: this is confusing but correct\n if not self.flip_uvs:\n tex = th.flip(tex.permute(0, 2, 3, 1), (1,))\n\n textures = TexturesUV(\n maps=tex,\n faces_uvs=faces_uvs,\n verts_uvs=verts_uvs,\n ) \n meshes = Meshes(verts, faces, textures=textures)\n \n fragments = self.rasterizer(meshes, cameras=cameras)\n rgb = meshes.sample_textures(fragments)[:,:,:,0] \n rgb[fragments.pix_to_face[...,0] == -1] = 0.0 \n\n return {'render': rgb.permute(0, 3, 1, 2)}" }, { "identifier": "FaceDecoderFrontal", "path": "ca_body/nn/face.py", "snippet": "class FaceDecoderFrontal(nn.Module):\n def __init__(\n self,\n assets: AttrDict,\n n_latent: int = 256,\n n_vert_out: int = 3 * 7306,\n tex_out_shp: Tuple[int, int] = (1024, 1024),\n tex_roi: Tuple[Tuple[int, int], Tuple[int, int]] = ((0, 0), (1024, 1024)),\n ) -> None:\n super().__init__()\n self.n_latent = n_latent\n self.n_vert_out = n_vert_out\n self.tex_roi = tex_roi\n self.tex_roi_shp: Tuple[int, int] = tuple(\n [int(i) for i in np.diff(np.array(tex_roi), axis=0).squeeze()]\n )\n self.tex_out_shp = tex_out_shp\n\n self.encmod = nn.Sequential(la.LinearWN(n_latent, 256), nn.LeakyReLU(0.2, inplace=True))\n self.geommod = nn.Sequential(la.LinearWN(256, n_vert_out))\n\n self.viewmod = nn.Sequential(la.LinearWN(3, 8), nn.LeakyReLU(0.2, inplace=True))\n self.texmod2 = nn.Sequential(\n la.LinearWN(256 + 8, 256 * 4 * 4), nn.LeakyReLU(0.2, inplace=True)\n )\n self.texmod = nn.Sequential(\n la.ConvTranspose2dWNUB(256, 256, 8, 8, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(256, 128, 16, 16, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(128, 128, 32, 32, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(128, 64, 64, 64, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(64, 64, 128, 128, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(64, 32, 256, 256, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(32, 8, 512, 512, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(8, 3, 1024, 1024, 4, 2, 1),\n )\n\n self.bias = nn.Parameter(th.zeros(3, self.tex_roi_shp[0], self.tex_roi_shp[1]))\n self.bias.data.zero_()\n\n self.register_buffer(\n \"frontal_view\", th.as_tensor(assets.face_frontal_view, dtype=th.float32)\n )\n\n self.apply(lambda x: la.glorot(x, 0.2))\n la.glorot(self.texmod[-1], 1.0)\n\n def forward(self, face_embs: th.Tensor) -> Dict[str, th.Tensor]:\n B = face_embs.shape[0]\n view = self.frontal_view[np.newaxis].expand(B, -1)\n encout = self.encmod(face_embs)\n geomout = self.geommod(encout)\n viewout = self.viewmod(view)\n encview = th.cat([encout, viewout], dim=1)\n texout = self.texmod(self.texmod2(encview).view(-1, 256, 4, 4))\n out = {\"face_geom\": geomout.view(geomout.shape[0], -1, 3)}\n out[\"face_tex_raw\"] = texout\n texout = texout + self.bias[None]\n out[\"face_tex\"] = 255 * (texout + 0.5)\n return out" } ]
import logging import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F import ca_body.nn.layers as la from typing import Dict, Optional, Tuple from torchvision.utils import make_grid from torchvision.transforms.functional import gaussian_blur from ca_body.nn.blocks import ( ConvBlock, ConvDownBlock, UpConvBlockDeep, tile2d, weights_initializer, ) from ca_body.nn.dof_cal import LearnableBlur from ca_body.utils.geom import ( GeometryModule, compute_view_cos, depth_discontuity_mask, depth2normals, ) from ca_body.nn.shadow import ShadowUNet, PoseToShadow from ca_body.nn.unet import UNetWB from ca_body.nn.color_cal import CalV5 from ca_body.utils.image import linear2displayBatch from ca_body.utils.lbs import LBSModule from ca_body.utils.render import RenderLayer from ca_body.utils.seams import SeamSampler from ca_body.utils.render import RenderLayer from ca_body.nn.face import FaceDecoderFrontal
13,133
x = self.conv_blocks[b](x) # NOTE: here we do resampling at feature level x = self.seam_sampler.impaint(x) x = self.seam_sampler.resample(x) x = self.seam_sampler.resample(x) verts_features, tex_features = th.split(x, self.n_channels[-1], 1) verts_uv_delta_rec = self.verts_conv(verts_features) # TODO: need to get values verts_delta_rec = self.geo_fn.from_uv(verts_uv_delta_rec) tex_mean_rec = self.tex_conv(tex_features) preds = { 'geom_delta_rec': verts_delta_rec, 'geom_uv_delta_rec': verts_uv_delta_rec, 'tex_mean_rec': tex_mean_rec, 'embs_conv': embs_conv, 'pose_conv': pose_conv, } return preds class FaceEncoder(nn.Module): """A joint encoder for tex and geometry.""" def __init__( self, noise_std, assets, n_embs=256, uv_size=512, logvar_scale=0.1, n_vert_in=7306 * 3, prefix="face_", ): """Fixed-width conv encoder.""" super().__init__() # TODO: self.noise_std = noise_std self.n_embs = n_embs self.logvar_scale = logvar_scale self.prefix = prefix self.uv_size = uv_size assert self.uv_size == 512 tex_cond_mask = assets.mugsy_face_mask[..., 0] tex_cond_mask = th.as_tensor(tex_cond_mask, dtype=th.float32)[np.newaxis, np.newaxis] tex_cond_mask = F.interpolate( tex_cond_mask, (self.uv_size, self.uv_size), mode="bilinear", align_corners=True ) self.register_buffer("tex_cond_mask", tex_cond_mask) self.conv_blocks = nn.Sequential( ConvDownBlock(3, 4, 512), ConvDownBlock(4, 8, 256), ConvDownBlock(8, 16, 128), ConvDownBlock(16, 32, 64), ConvDownBlock(32, 64, 32), ConvDownBlock(64, 128, 16), ConvDownBlock(128, 128, 8), ) self.geommod = nn.Sequential(la.LinearWN(n_vert_in, 256), nn.LeakyReLU(0.2, inplace=True)) self.jointmod = nn.Sequential( la.LinearWN(256 + 128 * 4 * 4, 512), nn.LeakyReLU(0.2, inplace=True) ) # TODO: should we put initializer self.mu = la.LinearWN(512, self.n_embs) self.logvar = la.LinearWN(512, self.n_embs) self.apply(weights_initializer(0.2)) self.mu.apply(weights_initializer(1.0)) self.logvar.apply(weights_initializer(1.0)) # TODO: compute_losses()? def forward(self, face_geom: th.Tensor, face_tex: th.Tensor, **kwargs): B = face_geom.shape[0] tex_cond = F.interpolate( face_tex, (self.uv_size, self.uv_size), mode="bilinear", align_corners=False ) tex_cond = (tex_cond / 255.0 - 0.5) * self.tex_cond_mask x = self.conv_blocks(tex_cond) tex_enc = x.reshape(B, 4 * 4 * 128) geom_enc = self.geommod(face_geom.reshape(B, -1)) x = self.jointmod(th.cat([tex_enc, geom_enc], dim=1)) embs_mu = self.mu(x) embs_logvar = self.logvar_scale * self.logvar(x) # NOTE: the noise is only applied to the input-conditioned values if self.training: noise = th.randn_like(embs_mu) embs = embs_mu + th.exp(embs_logvar) * noise * self.noise_std else: embs = embs_mu.clone() preds = {"embs": embs, "embs_mu": embs_mu, "embs_logvar": embs_logvar, "tex_cond": tex_cond} preds = {f"{self.prefix}{k}": v for k, v in preds.items()} return preds class UNetViewDecoder(nn.Module): def __init__(self, geo_fn, net_uv_size, seam_sampler, n_init_ftrs=8): super().__init__() self.geo_fn = geo_fn self.net_uv_size = net_uv_size self.unet = UNetWB(4, 3, n_init_ftrs=n_init_ftrs, size=net_uv_size) self.register_buffer("faces", self.geo_fn.vi.to(th.int64), persistent=False) def forward(self, geom_rec, tex_mean_rec, camera_pos): with th.no_grad():
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) class CameraPixelBias(nn.Module): def __init__(self, image_height, image_width, cameras, ds_rate) -> None: super().__init__() self.image_height = image_height self.image_width = image_width self.cameras = cameras self.n_cameras = len(cameras) bias = th.zeros( (self.n_cameras, 1, image_width // ds_rate, image_height // ds_rate), dtype=th.float32 ) self.register_parameter("bias", nn.Parameter(bias)) def forward(self, idxs: th.Tensor): bias_up = F.interpolate( self.bias[idxs], size=(self.image_height, self.image_width), mode='bilinear' ) return bias_up class AutoEncoder(nn.Module): def __init__( self, encoder, decoder, decoder_view, encoder_face, # hqlp decoder to get the codes decoder_face, shadow_net, upscale_net, assets, pose_to_shadow=None, renderer=None, cal=None, pixel_cal=None, learn_blur: bool = True, ): super().__init__() # TODO: should we have a shared LBS here? self.geo_fn = GeometryModule( assets.topology.vi, assets.topology.vt, assets.topology.vti, assets.topology.v2uv, uv_size=1024, impaint=True, ) self.lbs_fn = LBSModule( assets.lbs_model_json, assets.lbs_config_dict, assets.lbs_template_verts, assets.lbs_scale, assets.global_scaling, ) self.seam_sampler = SeamSampler(assets.seam_data_1024) self.seam_sampler_2k = SeamSampler(assets.seam_data_2048) # joint tex -> body and clothes # TODO: why do we have a joint one in the first place? tex_mean = gaussian_blur(th.as_tensor(assets.tex_mean)[np.newaxis], kernel_size=11) self.register_buffer("tex_mean", F.interpolate(tex_mean, (2048, 2048), mode='bilinear')) # this is shared self.tex_std = assets.tex_var if 'tex_var' in assets else 64.0 face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) meye_mask = self.geo_fn.to_uv( th.as_tensor(assets.mouth_eyes_mask_geom[np.newaxis, :, np.newaxis]) ) meye_mask = F.interpolate(meye_mask, (2048, 2048), mode='bilinear') self.register_buffer("meye_mask", meye_mask) self.decoder = ConvDecoder( geo_fn=self.geo_fn, seam_sampler=self.seam_sampler, **decoder, assets=assets, ) # embs for everything but face non_head_mask = 1.0 - assets.face_mask self.encoder = Encoder( geo_fn=self.geo_fn, mask=non_head_mask, **encoder, ) self.encoder_face = FaceEncoder( assets=assets, **encoder_face, ) # using face decoder to generate better conditioning decoder_face_ckpt_path = None if 'ckpt' in decoder_face: decoder_face_ckpt_path = decoder_face.pop('ckpt') self.decoder_face = FaceDecoderFrontal(assets=assets, **decoder_face) if decoder_face_ckpt_path is not None: self.decoder_face.load_state_dict(th.load(decoder_face_ckpt_path), strict=False) self.decoder_view = UNetViewDecoder( self.geo_fn, seam_sampler=self.seam_sampler, **decoder_view, ) self.shadow_net = ShadowUNet( ao_mean=assets.ao_mean, interp_mode="bilinear", biases=False, **shadow_net, ) self.pose_to_shadow_enabled = False if pose_to_shadow is not None: self.pose_to_shadow_enabled = True self.pose_to_shadow = PoseToShadow(**pose_to_shadow) self.upscale_net = UpscaleNet( in_channels=6, size=1024, upscale_factor=2, out_channels=3, **upscale_net ) self.pixel_cal_enabled = False if pixel_cal is not None: self.pixel_cal_enabled = True self.pixel_cal = CameraPixelBias(**pixel_cal, cameras=assets.camera_ids) self.learn_blur_enabled = False if learn_blur: self.learn_blur_enabled = True self.learn_blur = LearnableBlur(assets.camera_ids) # training-only stuff self.cal_enabled = False if cal is not None: self.cal_enabled = True self.cal = CalV5(**cal, cameras=assets.camera_ids) self.rendering_enabled = False if renderer is not None: self.rendering_enabled = True self.renderer = RenderLayer( h=renderer.image_height, w=renderer.image_width, vt=self.geo_fn.vt, vi=self.geo_fn.vi, vti=self.geo_fn.vti, flip_uvs=False, ) @th.jit.unused def compute_summaries(self, preds, batch): # TODO: switch to common summaries? # return compute_summaries_mesh(preds, batch) rgb = linear2displayBatch(preds['rgb'][:, :3]) rgb_gt = linear2displayBatch(batch['image']) depth = preds['depth'][:, np.newaxis] mask = depth > 0.0 normals = ( 255 * (1.0 - depth2normals(depth, batch['focal'], batch['princpt'])) / 2.0 ) * mask grid_rgb = make_grid(rgb, nrow=16).permute(1, 2, 0).clip(0, 255).to(th.uint8) grid_rgb_gt = make_grid(rgb_gt, nrow=16).permute(1, 2, 0).clip(0, 255).to(th.uint8) grid_normals = make_grid(normals, nrow=16).permute(1, 2, 0).clip(0, 255).to(th.uint8) progress_image = th.cat([grid_rgb, grid_rgb_gt, grid_normals], dim=0) return { 'progress_image': (progress_image, 'png'), } def forward_tex(self, tex_mean_rec, tex_view_rec, shadow_map): x = th.cat([tex_mean_rec, tex_view_rec], dim=1) tex_rec = tex_mean_rec + tex_view_rec tex_rec = self.seam_sampler.impaint(tex_rec) tex_rec = self.seam_sampler.resample(tex_rec) tex_rec = F.interpolate(tex_rec, size=(2048, 2048), mode="bilinear", align_corners=False) tex_rec = tex_rec + self.upscale_net(x) tex_rec = tex_rec * self.tex_std + self.tex_mean shadow_map = self.seam_sampler_2k.impaint(shadow_map) shadow_map = self.seam_sampler_2k.resample(shadow_map) shadow_map = self.seam_sampler_2k.resample(shadow_map) tex_rec = tex_rec * shadow_map tex_rec = self.seam_sampler_2k.impaint(tex_rec) tex_rec = self.seam_sampler_2k.resample(tex_rec) tex_rec = self.seam_sampler_2k.resample(tex_rec) return tex_rec def encode(self, geom: th.Tensor, lbs_motion: th.Tensor, face_embs_hqlp: th.Tensor): with th.no_grad(): verts_unposed = self.lbs_fn.unpose(geom, lbs_motion) verts_unposed_uv = self.geo_fn.to_uv(verts_unposed) # extract face region for geom + tex enc_preds = self.encoder(motion=lbs_motion, verts_unposed=verts_unposed) # TODO: probably need to rename these to `face_embs_mugsy` or smth # TODO: we need the same thing for face? # enc_face_preds = self.encoder_face(verts_unposed_uv) with th.no_grad(): face_dec_preds = self.decoder_face(face_embs_hqlp) enc_face_preds = self.encoder_face(**face_dec_preds) preds = { **enc_preds, **enc_face_preds, 'face_dec_preds': face_dec_preds, } return preds def forward( self, # TODO: should we try using this as well for cond? lbs_motion: th.Tensor, campos: th.Tensor, geom: Optional[th.Tensor] = None, ao: Optional[th.Tensor] = None, K: Optional[th.Tensor] = None, Rt: Optional[th.Tensor] = None, image_bg: Optional[th.Tensor] = None, image: Optional[th.Tensor] = None, image_mask: Optional[th.Tensor] = None, embs: Optional[th.Tensor] = None, _index: Optional[Dict[str, th.Tensor]] = None, face_embs: Optional[th.Tensor] = None, embs_conv: Optional[th.Tensor] = None, tex_seg: Optional[th.Tensor] = None, encode=True, iteration: Optional[int] = None, **kwargs, ): B = lbs_motion.shape[0] if not th.jit.is_scripting() and encode: # NOTE: these are `face_embs_hqlp` enc_preds = self.encode(geom, lbs_motion, face_embs) embs = enc_preds['embs'] # NOTE: these are `face_embs` in body space face_embs_body = enc_preds['face_embs'] dec_preds = self.decoder( motion=lbs_motion, embs=embs, face_embs=face_embs_body, embs_conv=embs_conv, ) geom_rec = self.lbs_fn.pose(dec_preds['geom_delta_rec'], lbs_motion) dec_view_preds = self.decoder_view( geom_rec=geom_rec, tex_mean_rec=dec_preds["tex_mean_rec"], camera_pos=campos, ) # TODO: should we train an AO model? if self.training and self.pose_to_shadow_enabled: shadow_preds = self.shadow_net(ao_map=ao) pose_shadow_preds = self.pose_to_shadow(lbs_motion) shadow_preds['pose_shadow_map'] = pose_shadow_preds['shadow_map'] elif self.pose_to_shadow_enabled: shadow_preds = self.pose_to_shadow(lbs_motion) else: shadow_preds = self.shadow_net(ao_map=ao) tex_rec = self.forward_tex( dec_preds["tex_mean_rec"], dec_view_preds["tex_view_rec"], shadow_preds["shadow_map"], ) if not th.jit.is_scripting() and self.cal_enabled: tex_rec = self.cal(tex_rec, self.cal.name_to_idx(_index['camera'])) preds = { 'geom': geom_rec, 'tex_rec': tex_rec, **dec_preds, **shadow_preds, **dec_view_preds, } if not th.jit.is_scripting() and encode: preds.update(**enc_preds) if not th.jit.is_scripting() and self.rendering_enabled: # NOTE: this is a reduced version tested for forward only renders = self.renderer( preds['geom'], tex_rec, K=K, Rt=Rt, ) preds.update(rgb=renders['render']) if not th.jit.is_scripting() and self.learn_blur_enabled: preds['rgb'] = self.learn_blur(preds['rgb'], _index['camera']) preds['learn_blur_weights'] = self.learn_blur.reg(_index['camera']) if not th.jit.is_scripting() and self.pixel_cal_enabled: assert self.cal_enabled cam_idxs = self.cal.name_to_idx(_index['camera']) pixel_bias = self.pixel_cal(cam_idxs) preds['rgb'] = preds['rgb'] + pixel_bias return preds class Encoder(nn.Module): """A joint encoder for tex and geometry.""" def __init__( self, geo_fn, n_embs, noise_std, mask, logvar_scale=0.1, ): """Fixed-width conv encoder.""" super().__init__() self.noise_std = noise_std self.n_embs = n_embs self.geo_fn = geo_fn self.logvar_scale = logvar_scale self.verts_conv = ConvDownBlock(3, 8, 512) mask = th.as_tensor(mask[np.newaxis, np.newaxis], dtype=th.float32) mask = F.interpolate(mask, size=(512, 512), mode='bilinear').to(th.bool) self.register_buffer("mask", mask) self.joint_conv_blocks = nn.Sequential( ConvDownBlock(8, 16, 256), ConvDownBlock(16, 32, 128), ConvDownBlock(32, 32, 64), ConvDownBlock(32, 64, 32), ConvDownBlock(64, 128, 16), ConvDownBlock(128, 128, 8), # ConvDownBlock(128, 128, 4), ) # TODO: should we put initializer self.mu = la.LinearWN(4 * 4 * 128, self.n_embs) self.logvar = la.LinearWN(4 * 4 * 128, self.n_embs) self.apply(weights_initializer(0.2)) self.mu.apply(weights_initializer(1.0)) self.logvar.apply(weights_initializer(1.0)) def forward(self, motion, verts_unposed): preds = {} B = motion.shape[0] # converting motion to the unposed verts_cond = ( F.interpolate(self.geo_fn.to_uv(verts_unposed), size=(512, 512), mode='bilinear') * self.mask ) verts_cond = self.verts_conv(verts_cond) # tex_cond = F.interpolate(tex_avg, size=(512, 512), mode='bilinear') * self.mask # tex_cond = self.tex_conv(tex_cond) # joint_cond = th.cat([verts_cond, tex_cond], dim=1) joint_cond = verts_cond x = self.joint_conv_blocks(joint_cond) x = x.reshape(B, -1) embs_mu = self.mu(x) embs_logvar = self.logvar_scale * self.logvar(x) # NOTE: the noise is only applied to the input-conditioned values if self.training: noise = th.randn_like(embs_mu) embs = embs_mu + th.exp(embs_logvar) * noise * self.noise_std else: embs = embs_mu.clone() preds.update( embs=embs, embs_mu=embs_mu, embs_logvar=embs_logvar, ) return preds class ConvDecoder(nn.Module): """Multi-region view-independent decoder.""" def __init__( self, geo_fn, uv_size, seam_sampler, init_uv_size, n_pose_dims, n_pose_enc_channels, n_embs, n_embs_enc_channels, n_face_embs, n_init_channels, n_min_channels, assets, ): super().__init__() self.geo_fn = geo_fn self.uv_size = uv_size self.init_uv_size = init_uv_size self.n_pose_dims = n_pose_dims self.n_pose_enc_channels = n_pose_enc_channels self.n_embs = n_embs self.n_embs_enc_channels = n_embs_enc_channels self.n_face_embs = n_face_embs self.n_blocks = int(np.log2(self.uv_size // init_uv_size)) self.sizes = [init_uv_size * 2**s for s in range(self.n_blocks + 1)] # TODO: just specify a sequence? self.n_channels = [ max(n_init_channels // 2**b, n_min_channels) for b in range(self.n_blocks + 1) ] logger.info(f"ConvDecoder: n_channels = {self.n_channels}") self.local_pose_conv_block = ConvBlock( n_pose_dims, n_pose_enc_channels, init_uv_size, kernel_size=1, padding=0, ) self.embs_fc = nn.Sequential( la.LinearWN(n_embs, 4 * 4 * 128), nn.LeakyReLU(0.2, inplace=True), ) # TODO: should we switch to the basic version? self.embs_conv_block = nn.Sequential( UpConvBlockDeep(128, 128, 8), UpConvBlockDeep(128, 128, 16), UpConvBlockDeep(128, 64, 32), UpConvBlockDeep(64, n_embs_enc_channels, 64), ) self.face_embs_fc = nn.Sequential( la.LinearWN(n_face_embs, 4 * 4 * 32), nn.LeakyReLU(0.2, inplace=True), ) self.face_embs_conv_block = nn.Sequential( UpConvBlockDeep(32, 64, 8), UpConvBlockDeep(64, 64, 16), UpConvBlockDeep(64, n_embs_enc_channels, 32), ) n_groups = 2 self.joint_conv_block = ConvBlock( n_pose_enc_channels + n_embs_enc_channels, n_init_channels, self.init_uv_size, ) self.conv_blocks = nn.ModuleList([]) for b in range(self.n_blocks): self.conv_blocks.append( UpConvBlockDeep( self.n_channels[b] * n_groups, self.n_channels[b + 1] * n_groups, self.sizes[b + 1], groups=n_groups, ), ) self.verts_conv = la.Conv2dWNUB( in_channels=self.n_channels[-1], out_channels=3, kernel_size=3, height=self.uv_size, width=self.uv_size, padding=1, ) self.tex_conv = la.Conv2dWNUB( in_channels=self.n_channels[-1], out_channels=3, kernel_size=3, height=self.uv_size, width=self.uv_size, padding=1, ) self.apply(weights_initializer(0.2)) self.verts_conv.apply(weights_initializer(1.0)) self.tex_conv.apply(weights_initializer(1.0)) self.seam_sampler = seam_sampler # NOTE: removing head region from pose completely pose_cond_mask = th.as_tensor( assets.pose_cond_mask[np.newaxis] * (1 - assets.head_cond_mask[np.newaxis, np.newaxis]), dtype=th.int32, ) self.register_buffer("pose_cond_mask", pose_cond_mask) face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) body_cond_mask = th.as_tensor(assets.body_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("body_cond_mask", body_cond_mask) def forward(self, motion, embs, face_embs, embs_conv: Optional[th.Tensor] = None): # processing pose pose = motion[:, 6:] B = pose.shape[0] non_head_mask = (self.body_cond_mask * (1.0 - self.face_cond_mask)).clip(0.0, 1.0) pose_masked = tile2d(pose, self.init_uv_size) * self.pose_cond_mask pose_conv = self.local_pose_conv_block(pose_masked) * non_head_mask # TODO: decoding properly? if embs_conv is None: embs_conv = self.embs_conv_block(self.embs_fc(embs).reshape(B, 128, 4, 4)) face_conv = self.face_embs_conv_block(self.face_embs_fc(face_embs).reshape(B, 32, 4, 4)) # merging embeddings with spatial masks embs_conv[:, :, 32:, :32] = ( face_conv * self.face_cond_mask[:, :, 32:, :32] + embs_conv[:, :, 32:, :32] * non_head_mask[:, :, 32:, :32] ) joint = th.cat([pose_conv, embs_conv], axis=1) joint = self.joint_conv_block(joint) x = th.cat([joint, joint], axis=1) for b in range(self.n_blocks): x = self.conv_blocks[b](x) # NOTE: here we do resampling at feature level x = self.seam_sampler.impaint(x) x = self.seam_sampler.resample(x) x = self.seam_sampler.resample(x) verts_features, tex_features = th.split(x, self.n_channels[-1], 1) verts_uv_delta_rec = self.verts_conv(verts_features) # TODO: need to get values verts_delta_rec = self.geo_fn.from_uv(verts_uv_delta_rec) tex_mean_rec = self.tex_conv(tex_features) preds = { 'geom_delta_rec': verts_delta_rec, 'geom_uv_delta_rec': verts_uv_delta_rec, 'tex_mean_rec': tex_mean_rec, 'embs_conv': embs_conv, 'pose_conv': pose_conv, } return preds class FaceEncoder(nn.Module): """A joint encoder for tex and geometry.""" def __init__( self, noise_std, assets, n_embs=256, uv_size=512, logvar_scale=0.1, n_vert_in=7306 * 3, prefix="face_", ): """Fixed-width conv encoder.""" super().__init__() # TODO: self.noise_std = noise_std self.n_embs = n_embs self.logvar_scale = logvar_scale self.prefix = prefix self.uv_size = uv_size assert self.uv_size == 512 tex_cond_mask = assets.mugsy_face_mask[..., 0] tex_cond_mask = th.as_tensor(tex_cond_mask, dtype=th.float32)[np.newaxis, np.newaxis] tex_cond_mask = F.interpolate( tex_cond_mask, (self.uv_size, self.uv_size), mode="bilinear", align_corners=True ) self.register_buffer("tex_cond_mask", tex_cond_mask) self.conv_blocks = nn.Sequential( ConvDownBlock(3, 4, 512), ConvDownBlock(4, 8, 256), ConvDownBlock(8, 16, 128), ConvDownBlock(16, 32, 64), ConvDownBlock(32, 64, 32), ConvDownBlock(64, 128, 16), ConvDownBlock(128, 128, 8), ) self.geommod = nn.Sequential(la.LinearWN(n_vert_in, 256), nn.LeakyReLU(0.2, inplace=True)) self.jointmod = nn.Sequential( la.LinearWN(256 + 128 * 4 * 4, 512), nn.LeakyReLU(0.2, inplace=True) ) # TODO: should we put initializer self.mu = la.LinearWN(512, self.n_embs) self.logvar = la.LinearWN(512, self.n_embs) self.apply(weights_initializer(0.2)) self.mu.apply(weights_initializer(1.0)) self.logvar.apply(weights_initializer(1.0)) # TODO: compute_losses()? def forward(self, face_geom: th.Tensor, face_tex: th.Tensor, **kwargs): B = face_geom.shape[0] tex_cond = F.interpolate( face_tex, (self.uv_size, self.uv_size), mode="bilinear", align_corners=False ) tex_cond = (tex_cond / 255.0 - 0.5) * self.tex_cond_mask x = self.conv_blocks(tex_cond) tex_enc = x.reshape(B, 4 * 4 * 128) geom_enc = self.geommod(face_geom.reshape(B, -1)) x = self.jointmod(th.cat([tex_enc, geom_enc], dim=1)) embs_mu = self.mu(x) embs_logvar = self.logvar_scale * self.logvar(x) # NOTE: the noise is only applied to the input-conditioned values if self.training: noise = th.randn_like(embs_mu) embs = embs_mu + th.exp(embs_logvar) * noise * self.noise_std else: embs = embs_mu.clone() preds = {"embs": embs, "embs_mu": embs_mu, "embs_logvar": embs_logvar, "tex_cond": tex_cond} preds = {f"{self.prefix}{k}": v for k, v in preds.items()} return preds class UNetViewDecoder(nn.Module): def __init__(self, geo_fn, net_uv_size, seam_sampler, n_init_ftrs=8): super().__init__() self.geo_fn = geo_fn self.net_uv_size = net_uv_size self.unet = UNetWB(4, 3, n_init_ftrs=n_init_ftrs, size=net_uv_size) self.register_buffer("faces", self.geo_fn.vi.to(th.int64), persistent=False) def forward(self, geom_rec, tex_mean_rec, camera_pos): with th.no_grad():
view_cos = compute_view_cos(geom_rec, self.faces, camera_pos)
7
2023-12-27 15:31:35+00:00
16k
open-mmlab/Amphion
modules/wenet_extractor/squeezeformer/encoder.py
[ { "identifier": "DepthwiseConv2dSubsampling4", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class DepthwiseConv2dSubsampling4(BaseSubsampling):\n \"\"\"Depthwise Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n pos_enc_class (nn.Module): position encoding class.\n dw_stride (int): Whether do depthwise convolution.\n input_size (int): filter bank dimension.\n\n \"\"\"\n\n def __init__(\n self,\n idim: int,\n odim: int,\n pos_enc_class: torch.nn.Module,\n dw_stride: bool = False,\n input_size: int = 80,\n input_dropout_rate: float = 0.1,\n init_weights: bool = True,\n ):\n super(DepthwiseConv2dSubsampling4, self).__init__()\n self.idim = idim\n self.odim = odim\n self.pw_conv = nn.Conv2d(\n in_channels=idim, out_channels=odim, kernel_size=3, stride=2\n )\n self.act1 = nn.ReLU()\n self.dw_conv = nn.Conv2d(\n in_channels=odim,\n out_channels=odim,\n kernel_size=3,\n stride=2,\n groups=odim if dw_stride else 1,\n )\n self.act2 = nn.ReLU()\n self.pos_enc = pos_enc_class\n self.input_proj = nn.Sequential(\n nn.Linear(odim * (((input_size - 1) // 2 - 1) // 2), odim),\n nn.Dropout(p=input_dropout_rate),\n )\n if init_weights:\n linear_max = (odim * input_size / 4) ** -0.5\n torch.nn.init.uniform_(\n self.input_proj.state_dict()[\"0.weight\"], -linear_max, linear_max\n )\n torch.nn.init.uniform_(\n self.input_proj.state_dict()[\"0.bias\"], -linear_max, linear_max\n )\n self.subsampling_rate = 4\n # 6 = (3 - 1) * 1 + (3 - 1) * 2\n self.right_context = 6\n\n def forward(\n self, x: torch.Tensor, x_mask: torch.Tensor, offset: int = 0\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n x = x.unsqueeze(1) # (b, c=1, t, f)\n x = self.pw_conv(x)\n x = self.act1(x)\n x = self.dw_conv(x)\n x = self.act2(x)\n b, c, t, f = x.size()\n x = x.permute(0, 2, 1, 3)\n x = x.contiguous().view(b, t, c * f)\n x, pos_emb = self.pos_enc(x, offset)\n x = self.input_proj(x)\n return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2]" }, { "identifier": "TimeReductionLayer1D", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class TimeReductionLayer1D(nn.Module):\n \"\"\"\n Modified NeMo,\n Squeezeformer Time Reduction procedure.\n Downsamples the audio by `stride` in the time dimension.\n Args:\n channel (int): input dimension of\n MultiheadAttentionMechanism and PositionwiseFeedForward\n out_dim (int): Output dimension of the module.\n kernel_size (int): Conv kernel size for\n depthwise convolution in convolution module\n stride (int): Downsampling factor in time dimension.\n \"\"\"\n\n def __init__(\n self, channel: int, out_dim: int, kernel_size: int = 5, stride: int = 2\n ):\n super(TimeReductionLayer1D, self).__init__()\n\n self.channel = channel\n self.out_dim = out_dim\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = max(0, self.kernel_size - self.stride)\n\n self.dw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=channel,\n kernel_size=kernel_size,\n stride=stride,\n padding=self.padding,\n groups=channel,\n )\n\n self.pw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=out_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n groups=1,\n )\n\n self.init_weights()\n\n def init_weights(self):\n dw_max = self.kernel_size**-0.5\n pw_max = self.channel**-0.5\n torch.nn.init.uniform_(self.dw_conv.weight, -dw_max, dw_max)\n torch.nn.init.uniform_(self.dw_conv.bias, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pw_conv.weight, -pw_max, pw_max)\n torch.nn.init.uniform_(self.pw_conv.bias, -pw_max, pw_max)\n\n def forward(\n self,\n xs,\n xs_lens: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ):\n xs = xs.transpose(1, 2) # [B, C, T]\n xs = xs.masked_fill(mask_pad.eq(0), 0.0)\n\n xs = self.dw_conv(xs)\n xs = self.pw_conv(xs)\n\n xs = xs.transpose(1, 2) # [B, T, C]\n\n B, T, D = xs.size()\n mask = mask[:, :: self.stride, :: self.stride]\n mask_pad = mask_pad[:, :, :: self.stride]\n L = mask_pad.size(-1)\n # For JIT exporting, we remove F.pad operator.\n if L - T < 0:\n xs = xs[:, : L - T, :].contiguous()\n else:\n dummy_pad = torch.zeros(B, L - T, D, device=xs.device)\n xs = torch.cat([xs, dummy_pad], dim=1)\n\n xs_lens = torch.div(xs_lens + 1, 2, rounding_mode=\"trunc\")\n return xs, xs_lens, mask, mask_pad" }, { "identifier": "TimeReductionLayer2D", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class TimeReductionLayer2D(nn.Module):\n def __init__(self, kernel_size: int = 5, stride: int = 2, encoder_dim: int = 256):\n super(TimeReductionLayer2D, self).__init__()\n self.encoder_dim = encoder_dim\n self.kernel_size = kernel_size\n self.dw_conv = Conv2dValid(\n in_channels=encoder_dim,\n out_channels=encoder_dim,\n kernel_size=(kernel_size, 1),\n stride=stride,\n valid_trigy=True,\n )\n self.pw_conv = Conv2dValid(\n in_channels=encoder_dim,\n out_channels=encoder_dim,\n kernel_size=1,\n stride=1,\n valid_trigx=False,\n valid_trigy=False,\n )\n\n self.kernel_size = kernel_size\n self.stride = stride\n self.init_weights()\n\n def init_weights(self):\n dw_max = self.kernel_size**-0.5\n pw_max = self.encoder_dim**-0.5\n torch.nn.init.uniform_(self.dw_conv.weight, -dw_max, dw_max)\n torch.nn.init.uniform_(self.dw_conv.bias, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pw_conv.weight, -pw_max, pw_max)\n torch.nn.init.uniform_(self.pw_conv.bias, -pw_max, pw_max)\n\n def forward(\n self,\n xs: torch.Tensor,\n xs_lens: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n xs = xs.masked_fill(mask_pad.transpose(1, 2).eq(0), 0.0)\n xs = xs.unsqueeze(2)\n padding1 = self.kernel_size - self.stride\n xs = F.pad(xs, (0, 0, 0, 0, 0, padding1, 0, 0), mode=\"constant\", value=0.0)\n xs = self.dw_conv(xs.permute(0, 3, 1, 2))\n xs = self.pw_conv(xs).permute(0, 3, 2, 1).squeeze(1).contiguous()\n tmp_length = xs.size(1)\n xs_lens = torch.div(xs_lens + 1, 2, rounding_mode=\"trunc\")\n padding2 = max(0, (xs_lens.max() - tmp_length).data.item())\n batch_size, hidden = xs.size(0), xs.size(-1)\n dummy_pad = torch.zeros(batch_size, padding2, hidden, device=xs.device)\n xs = torch.cat([xs, dummy_pad], dim=1)\n mask = mask[:, ::2, ::2]\n mask_pad = mask_pad[:, :, ::2]\n return xs, xs_lens, mask, mask_pad" }, { "identifier": "TimeReductionLayerStream", "path": "modules/wenet_extractor/squeezeformer/subsampling.py", "snippet": "class TimeReductionLayerStream(nn.Module):\n \"\"\"\n Squeezeformer Time Reduction procedure.\n Downsamples the audio by `stride` in the time dimension.\n Args:\n channel (int): input dimension of\n MultiheadAttentionMechanism and PositionwiseFeedForward\n out_dim (int): Output dimension of the module.\n kernel_size (int): Conv kernel size for\n depthwise convolution in convolution module\n stride (int): Downsampling factor in time dimension.\n \"\"\"\n\n def __init__(\n self, channel: int, out_dim: int, kernel_size: int = 1, stride: int = 2\n ):\n super(TimeReductionLayerStream, self).__init__()\n\n self.channel = channel\n self.out_dim = out_dim\n self.kernel_size = kernel_size\n self.stride = stride\n\n self.dw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=channel,\n kernel_size=kernel_size,\n stride=stride,\n padding=0,\n groups=channel,\n )\n\n self.pw_conv = nn.Conv1d(\n in_channels=channel,\n out_channels=out_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n groups=1,\n )\n\n self.init_weights()\n\n def init_weights(self):\n dw_max = self.kernel_size**-0.5\n pw_max = self.channel**-0.5\n torch.nn.init.uniform_(self.dw_conv.weight, -dw_max, dw_max)\n torch.nn.init.uniform_(self.dw_conv.bias, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pw_conv.weight, -pw_max, pw_max)\n torch.nn.init.uniform_(self.pw_conv.bias, -pw_max, pw_max)\n\n def forward(\n self,\n xs,\n xs_lens: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ):\n xs = xs.transpose(1, 2) # [B, C, T]\n xs = xs.masked_fill(mask_pad.eq(0), 0.0)\n\n xs = self.dw_conv(xs)\n xs = self.pw_conv(xs)\n\n xs = xs.transpose(1, 2) # [B, T, C]\n\n B, T, D = xs.size()\n mask = mask[:, :: self.stride, :: self.stride]\n mask_pad = mask_pad[:, :, :: self.stride]\n L = mask_pad.size(-1)\n # For JIT exporting, we remove F.pad operator.\n if L - T < 0:\n xs = xs[:, : L - T, :].contiguous()\n else:\n dummy_pad = torch.zeros(B, L - T, D, device=xs.device)\n xs = torch.cat([xs, dummy_pad], dim=1)\n\n xs_lens = torch.div(xs_lens + 1, 2, rounding_mode=\"trunc\")\n return xs, xs_lens, mask, mask_pad" }, { "identifier": "SqueezeformerEncoderLayer", "path": "modules/wenet_extractor/squeezeformer/encoder_layer.py", "snippet": "class SqueezeformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer module.\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`\n instance can be used as the argument.\n feed_forward1 (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n conv_module (torch.nn.Module): Convolution module instance.\n `ConvlutionModule` instance can be used as the argument.\n feed_forward2 (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool):\n True: use layer_norm before each sub-block.\n False: use layer_norm after each sub-block.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n self_attn: torch.nn.Module,\n feed_forward1: Optional[nn.Module] = None,\n conv_module: Optional[nn.Module] = None,\n feed_forward2: Optional[nn.Module] = None,\n normalize_before: bool = False,\n dropout_rate: float = 0.1,\n concat_after: bool = False,\n ):\n super(SqueezeformerEncoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.layer_norm1 = nn.LayerNorm(size)\n self.ffn1 = feed_forward1\n self.layer_norm2 = nn.LayerNorm(size)\n self.conv_module = conv_module\n self.layer_norm3 = nn.LayerNorm(size)\n self.ffn2 = feed_forward2\n self.layer_norm4 = nn.LayerNorm(size)\n self.normalize_before = normalize_before\n self.dropout = nn.Dropout(dropout_rate)\n self.concat_after = concat_after\n if concat_after:\n self.concat_linear = nn.Linear(size + size, size)\n else:\n self.concat_linear = nn.Identity()\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n pos_emb: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n # self attention module\n residual = x\n if self.normalize_before:\n x = self.layer_norm1(x)\n x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, att_cache)\n if self.concat_after:\n x_concat = torch.cat((x, x_att), dim=-1)\n x = residual + self.concat_linear(x_concat)\n else:\n x = residual + self.dropout(x_att)\n if not self.normalize_before:\n x = self.layer_norm1(x)\n\n # ffn module\n residual = x\n if self.normalize_before:\n x = self.layer_norm2(x)\n x = self.ffn1(x)\n x = residual + self.dropout(x)\n if not self.normalize_before:\n x = self.layer_norm2(x)\n\n # conv module\n new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n residual = x\n if self.normalize_before:\n x = self.layer_norm3(x)\n x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)\n x = residual + self.dropout(x)\n if not self.normalize_before:\n x = self.layer_norm3(x)\n\n # ffn module\n residual = x\n if self.normalize_before:\n x = self.layer_norm4(x)\n x = self.ffn2(x)\n # we do not use dropout here since it is inside feed forward function\n x = residual + self.dropout(x)\n if not self.normalize_before:\n x = self.layer_norm4(x)\n\n return x, mask, new_att_cache, new_cnn_cache" }, { "identifier": "RelPositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class RelPositionalEncoding(PositionalEncoding):\n \"\"\"Relative positional encoding module.\n See : Appendix B in https://arxiv.org/abs/1901.02860\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n \"\"\"\n\n def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(d_model, dropout_rate, max_len, reverse=True)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute positional encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n torch.Tensor: Positional embedding tensor (1, time, `*`).\n \"\"\"\n self.pe = self.pe.to(x.device)\n x = x * self.xscale\n pos_emb = self.position_encoding(offset, x.size(1), False)\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "MultiHeadedAttention", "path": "modules/wenet_extractor/transformer/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head: int, n_feat: int, dropout_rate: float):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super().__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(\n self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor, size\n (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor, size\n (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor, size\n (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(\n self,\n value: torch.Tensor,\n scores: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> torch.Tensor:\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value, size\n (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score, size\n (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask, size (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be True?\n # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the\n # 1st chunk to ease the onnx export.]\n # 2. pytorch training\n if mask.size(2) > 0: # time2 > 0\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n # For last chunk, time2 might be larger than scores.size(-1)\n mask = mask[:, :, :, : scores.size(-1)] # (batch, 1, *, time2)\n scores = scores.masked_fill(mask, -float(\"inf\"))\n attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be False?\n # 1. onnx(16/-1, -1/-1, 16/0)\n # 2. jit (16/-1, -1/-1, 16/0, 16/4)\n else:\n attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n 1.When applying cross attention between decoder and encoder,\n the batch padding mask for input is in (#batch, 1, T) shape.\n 2.When applying self attention of encoder,\n the mask is in (#batch, T, T) shape.\n 3.When applying self attention of decoder,\n the mask is in (#batch, L, L) shape.\n 4.If the different position in decoder see different block\n of the encoder, such as Mocha, the passed in mask could be\n in (#batch, L, T) shape. But there is no such case in current\n Wenet.\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "modules/wenet_extractor/squeezeformer/attention.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding.\n Paper: https://arxiv.org/abs/1901.02860\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n \"\"\"\n\n def __init__(\n self,\n n_head,\n n_feat,\n dropout_rate,\n do_rel_shift=False,\n adaptive_scale=False,\n init_weights=False,\n ):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.do_rel_shift = do_rel_shift\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n self.adaptive_scale = adaptive_scale\n self.ada_scale = nn.Parameter(\n torch.ones([1, 1, n_feat]), requires_grad=adaptive_scale\n )\n self.ada_bias = nn.Parameter(\n torch.zeros([1, 1, n_feat]), requires_grad=adaptive_scale\n )\n if init_weights:\n self.init_weights()\n\n def init_weights(self):\n input_max = (self.h * self.d_k) ** -0.5\n torch.nn.init.uniform_(self.linear_q.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_q.bias, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_k.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_k.bias, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_v.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_v.bias, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_pos.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_out.weight, -input_max, input_max)\n torch.nn.init.uniform_(self.linear_out.bias, -input_max, input_max)\n\n def rel_shift(self, x, zero_triu: bool = False):\n \"\"\"Compute relative positinal encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, size).\n zero_triu (bool): If true, return the lower triangular part of\n the matrix.\n Returns:\n torch.Tensor: Output tensor.\n \"\"\"\n\n zero_pad = torch.zeros(\n (x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype\n )\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)\n\n if zero_triu:\n ones = torch.ones((x.size(2), x.size(3)))\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward_attention(\n self,\n value: torch.Tensor,\n scores: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> torch.Tensor:\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value, size\n (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score, size\n (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask, size (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be True?\n # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the\n # 1st chunk to ease the onnx export.]\n # 2. pytorch training\n if mask.size(2) > 0: # time2 > 0\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n # For last chunk, time2 might be larger than scores.size(-1)\n mask = mask[:, :, :, : scores.size(-1)] # (batch, 1, *, time2)\n scores = scores.masked_fill(mask, -float(\"inf\"))\n # (batch, head, time1, time2)\n attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be False?\n # 1. onnx(16/-1, -1/-1, 16/0)\n # 2. jit (16/-1, -1/-1, 16/0, 16/4)\n else:\n attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): Positional embedding tensor\n (#batch, time2, size).\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n \"\"\"\n if self.adaptive_scale:\n query = self.ada_scale * query + self.ada_bias\n key = self.ada_scale * key + self.ada_bias\n value = self.ada_scale * value + self.ada_bias\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time2)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n # Remove rel_shift since it is useless in speech recognition,\n # and it requires special attention for streaming.\n if self.do_rel_shift:\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "PositionwiseFeedForward", "path": "modules/wenet_extractor/squeezeformer/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n FeedForward are appied on each position of the sequence.\n The output dim is same with the input dim.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n activation (torch.nn.Module): Activation function\n \"\"\"\n\n def __init__(\n self,\n idim: int,\n hidden_units: int,\n dropout_rate: float,\n activation: torch.nn.Module = torch.nn.ReLU(),\n adaptive_scale: bool = False,\n init_weights: bool = False,\n ):\n \"\"\"Construct a PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForward, self).__init__()\n self.idim = idim\n self.hidden_units = hidden_units\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.activation = activation\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.w_2 = torch.nn.Linear(hidden_units, idim)\n self.ada_scale = None\n self.ada_bias = None\n self.adaptive_scale = adaptive_scale\n self.ada_scale = torch.nn.Parameter(\n torch.ones([1, 1, idim]), requires_grad=adaptive_scale\n )\n self.ada_bias = torch.nn.Parameter(\n torch.zeros([1, 1, idim]), requires_grad=adaptive_scale\n )\n if init_weights:\n self.init_weights()\n\n def init_weights(self):\n ffn1_max = self.idim**-0.5\n ffn2_max = self.hidden_units**-0.5\n torch.nn.init.uniform_(self.w_1.weight.data, -ffn1_max, ffn1_max)\n torch.nn.init.uniform_(self.w_1.bias.data, -ffn1_max, ffn1_max)\n torch.nn.init.uniform_(self.w_2.weight.data, -ffn2_max, ffn2_max)\n torch.nn.init.uniform_(self.w_2.bias.data, -ffn2_max, ffn2_max)\n\n def forward(self, xs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward function.\n\n Args:\n xs: input tensor (B, L, D)\n Returns:\n output tensor, (B, L, D)\n \"\"\"\n if self.adaptive_scale:\n xs = self.ada_scale * xs + self.ada_bias\n return self.w_2(self.dropout(self.activation(self.w_1(xs))))" }, { "identifier": "ConvolutionModule", "path": "modules/wenet_extractor/squeezeformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\n \"\"\"ConvolutionModule in Conformer model.\"\"\"\n\n def __init__(\n self,\n channels: int,\n kernel_size: int = 15,\n activation: nn.Module = nn.ReLU(),\n norm: str = \"batch_norm\",\n causal: bool = False,\n bias: bool = True,\n adaptive_scale: bool = False,\n init_weights: bool = False,\n ):\n \"\"\"Construct an ConvolutionModule object.\n Args:\n channels (int): The number of channels of conv layers.\n kernel_size (int): Kernel size of conv layers.\n causal (int): Whether use causal convolution or not\n \"\"\"\n super().__init__()\n self.bias = bias\n self.channels = channels\n self.kernel_size = kernel_size\n self.adaptive_scale = adaptive_scale\n self.ada_scale = torch.nn.Parameter(\n torch.ones([1, 1, channels]), requires_grad=adaptive_scale\n )\n self.ada_bias = torch.nn.Parameter(\n torch.zeros([1, 1, channels]), requires_grad=adaptive_scale\n )\n\n self.pointwise_conv1 = nn.Conv1d(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n # self.lorder is used to distinguish if it's a causal convolution,\n # if self.lorder > 0: it's a causal convolution, the input will be\n # padded with self.lorder frames on the left in forward.\n # else: it's a symmetrical convolution\n if causal:\n padding = 0\n self.lorder = kernel_size - 1\n else:\n # kernel_size should be an odd number for none causal convolution\n assert (kernel_size - 1) % 2 == 0\n padding = (kernel_size - 1) // 2\n self.lorder = 0\n self.depthwise_conv = nn.Conv1d(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=padding,\n groups=channels,\n bias=bias,\n )\n\n assert norm in [\"batch_norm\", \"layer_norm\"]\n if norm == \"batch_norm\":\n self.use_layer_norm = False\n self.norm = nn.BatchNorm1d(channels)\n else:\n self.use_layer_norm = True\n self.norm = nn.LayerNorm(channels)\n\n self.pointwise_conv2 = nn.Conv1d(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.activation = activation\n if init_weights:\n self.init_weights()\n\n def init_weights(self):\n pw_max = self.channels**-0.5\n dw_max = self.kernel_size**-0.5\n torch.nn.init.uniform_(self.pointwise_conv1.weight.data, -pw_max, pw_max)\n if self.bias:\n torch.nn.init.uniform_(self.pointwise_conv1.bias.data, -pw_max, pw_max)\n torch.nn.init.uniform_(self.depthwise_conv.weight.data, -dw_max, dw_max)\n if self.bias:\n torch.nn.init.uniform_(self.depthwise_conv.bias.data, -dw_max, dw_max)\n torch.nn.init.uniform_(self.pointwise_conv2.weight.data, -pw_max, pw_max)\n if self.bias:\n torch.nn.init.uniform_(self.pointwise_conv2.bias.data, -pw_max, pw_max)\n\n def forward(\n self,\n x: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n cache: torch.Tensor = torch.zeros((0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute convolution module.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, channels).\n mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),\n (0, 0, 0) means fake mask.\n cache (torch.Tensor): left context cache, it is only\n used in causal convolution (#batch, channels, cache_t),\n (0, 0, 0) meas fake cache.\n Returns:\n torch.Tensor: Output tensor (#batch, time, channels).\n \"\"\"\n if self.adaptive_scale:\n x = self.ada_scale * x + self.ada_bias\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2) # (#batch, channels, time)\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n if self.lorder > 0:\n if cache.size(2) == 0: # cache_t == 0\n x = nn.functional.pad(x, (self.lorder, 0), \"constant\", 0.0)\n else:\n assert cache.size(0) == x.size(0) # equal batch\n assert cache.size(1) == x.size(1) # equal channel\n x = torch.cat((cache, x), dim=2)\n assert x.size(2) > self.lorder\n new_cache = x[:, :, -self.lorder :]\n else:\n # It's better we just return None if no cache is required,\n # However, for JIT export, here we just fake one tensor instead of\n # None.\n new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.activation(self.norm(x))\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.pointwise_conv2(x)\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n return x.transpose(1, 2), new_cache" }, { "identifier": "make_pad_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:\n \"\"\"Make mask tensor containing indices of padded part.\n\n See description of make_non_pad_mask.\n\n Args:\n lengths (torch.Tensor): Batch of lengths (B,).\n Returns:\n torch.Tensor: Mask tensor containing indices of padded part.\n\n Examples:\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n \"\"\"\n batch_size = lengths.size(0)\n max_len = max_len if max_len > 0 else lengths.max().item()\n seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device)\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_length_expand = lengths.unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n return mask" }, { "identifier": "add_optional_chunk_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def add_optional_chunk_mask(\n xs: torch.Tensor,\n masks: torch.Tensor,\n use_dynamic_chunk: bool,\n use_dynamic_left_chunk: bool,\n decoding_chunk_size: int,\n static_chunk_size: int,\n num_decoding_left_chunks: int,\n):\n \"\"\"Apply optional mask for encoder.\n\n Args:\n xs (torch.Tensor): padded input, (B, L, D), L for max length\n mask (torch.Tensor): mask for xs, (B, 1, L)\n use_dynamic_chunk (bool): whether to use dynamic chunk or not\n use_dynamic_left_chunk (bool): whether to use dynamic left chunk for\n training.\n decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's\n 0: default for training, use random dynamic chunk.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n static_chunk_size (int): chunk size for static chunk training/decoding\n if it's greater than 0, if use_dynamic_chunk is true,\n this parameter will be ignored\n num_decoding_left_chunks: number of left chunks, this is for decoding,\n the chunk size is decoding_chunk_size.\n >=0: use num_decoding_left_chunks\n <0: use all left chunks\n\n Returns:\n torch.Tensor: chunk mask of the input xs.\n \"\"\"\n # Whether to use chunk mask or not\n if use_dynamic_chunk:\n max_len = xs.size(1)\n if decoding_chunk_size < 0:\n chunk_size = max_len\n num_left_chunks = -1\n elif decoding_chunk_size > 0:\n chunk_size = decoding_chunk_size\n num_left_chunks = num_decoding_left_chunks\n else:\n # chunk size is either [1, 25] or full context(max_len).\n # Since we use 4 times subsampling and allow up to 1s(100 frames)\n # delay, the maximum frame is 100 / 4 = 25.\n chunk_size = torch.randint(1, max_len, (1,)).item()\n num_left_chunks = -1\n if chunk_size > max_len // 2:\n chunk_size = max_len\n else:\n chunk_size = chunk_size % 25 + 1\n if use_dynamic_left_chunk:\n max_left_chunks = (max_len - 1) // chunk_size\n num_left_chunks = torch.randint(0, max_left_chunks, (1,)).item()\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n elif static_chunk_size > 0:\n num_left_chunks = num_decoding_left_chunks\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), static_chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n else:\n chunk_masks = masks\n return chunk_masks" }, { "identifier": "get_activation", "path": "modules/wenet_extractor/utils/common.py", "snippet": "def get_activation(act):\n \"\"\"Return activation function.\"\"\"\n # Lazy load to avoid unused import\n from modules.wenet_extractor.transformer.swish import Swish\n\n activation_funcs = {\n \"hardtanh\": torch.nn.Hardtanh,\n \"tanh\": torch.nn.Tanh,\n \"relu\": torch.nn.ReLU,\n \"selu\": torch.nn.SELU,\n \"swish\": getattr(torch.nn, \"SiLU\", Swish),\n \"gelu\": torch.nn.GELU,\n }\n\n return activation_funcs[act]()" } ]
import torch import torch.nn as nn from typing import Tuple, Union, Optional, List from modules.wenet_extractor.squeezeformer.subsampling import ( DepthwiseConv2dSubsampling4, TimeReductionLayer1D, TimeReductionLayer2D, TimeReductionLayerStream, ) from modules.wenet_extractor.squeezeformer.encoder_layer import ( SqueezeformerEncoderLayer, ) from modules.wenet_extractor.transformer.embedding import RelPositionalEncoding from modules.wenet_extractor.transformer.attention import MultiHeadedAttention from modules.wenet_extractor.squeezeformer.attention import ( RelPositionMultiHeadedAttention, ) from modules.wenet_extractor.squeezeformer.positionwise_feed_forward import ( PositionwiseFeedForward, ) from modules.wenet_extractor.squeezeformer.convolution import ConvolutionModule from modules.wenet_extractor.utils.mask import make_pad_mask, add_optional_chunk_mask from modules.wenet_extractor.utils.common import get_activation
14,125
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # class SqueezeformerEncoder(nn.Module): def __init__( self, input_size: int = 80, encoder_dim: int = 256, output_size: int = 256, attention_heads: int = 4, num_blocks: int = 12, reduce_idx: Optional[Union[int, List[int]]] = 5, recover_idx: Optional[Union[int, List[int]]] = 11, feed_forward_expansion_factor: int = 4, dw_stride: bool = False, input_dropout_rate: float = 0.1, pos_enc_layer_type: str = "rel_pos", time_reduction_layer_type: str = "conv1d", do_rel_shift: bool = True, feed_forward_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.1, cnn_module_kernel: int = 31, cnn_norm_type: str = "batch_norm", dropout: float = 0.1, causal: bool = False, adaptive_scale: bool = True, activation_type: str = "swish", init_weights: bool = True, global_cmvn: torch.nn.Module = None, normalize_before: bool = False, use_dynamic_chunk: bool = False, concat_after: bool = False, static_chunk_size: int = 0, use_dynamic_left_chunk: bool = False, ): """Construct SqueezeformerEncoder Args: input_size to use_dynamic_chunk, see in Transformer BaseEncoder. encoder_dim (int): The hidden dimension of encoder layer. output_size (int): The output dimension of final projection layer. attention_heads (int): Num of attention head in attention module. num_blocks (int): Num of encoder layers. reduce_idx Optional[Union[int, List[int]]]: reduce layer index, from 40ms to 80ms per frame. recover_idx Optional[Union[int, List[int]]]: recover layer index, from 80ms to 40ms per frame. feed_forward_expansion_factor (int): Enlarge coefficient of FFN. dw_stride (bool): Whether do depthwise convolution on subsampling module. input_dropout_rate (float): Dropout rate of input projection layer. pos_enc_layer_type (str): Self attention type. time_reduction_layer_type (str): Conv1d or Conv2d reduction layer. do_rel_shift (bool): Whether to do relative shift operation on rel-attention module. cnn_module_kernel (int): Kernel size of CNN module. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. cnn_module_kernel (int): Kernel size of convolution module. adaptive_scale (bool): Whether to use adaptive scale. init_weights (bool): Whether to initialize weights. causal (bool): whether to use causal convolution or not. """ super(SqueezeformerEncoder, self).__init__() self.global_cmvn = global_cmvn self.reduce_idx: Optional[Union[int, List[int]]] = ( [reduce_idx] if type(reduce_idx) == int else reduce_idx ) self.recover_idx: Optional[Union[int, List[int]]] = ( [recover_idx] if type(recover_idx) == int else recover_idx ) self.check_ascending_list() if reduce_idx is None: self.time_reduce = None else: if recover_idx is None: self.time_reduce = "normal" # no recovery at the end else: self.time_reduce = "recover" # recovery at the end assert len(self.reduce_idx) == len(self.recover_idx) self.reduce_stride = 2 self._output_size = output_size self.normalize_before = normalize_before self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk self.pos_enc_layer_type = pos_enc_layer_type activation = get_activation(activation_type) # self-attention module definition if pos_enc_layer_type != "rel_pos":
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # class SqueezeformerEncoder(nn.Module): def __init__( self, input_size: int = 80, encoder_dim: int = 256, output_size: int = 256, attention_heads: int = 4, num_blocks: int = 12, reduce_idx: Optional[Union[int, List[int]]] = 5, recover_idx: Optional[Union[int, List[int]]] = 11, feed_forward_expansion_factor: int = 4, dw_stride: bool = False, input_dropout_rate: float = 0.1, pos_enc_layer_type: str = "rel_pos", time_reduction_layer_type: str = "conv1d", do_rel_shift: bool = True, feed_forward_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.1, cnn_module_kernel: int = 31, cnn_norm_type: str = "batch_norm", dropout: float = 0.1, causal: bool = False, adaptive_scale: bool = True, activation_type: str = "swish", init_weights: bool = True, global_cmvn: torch.nn.Module = None, normalize_before: bool = False, use_dynamic_chunk: bool = False, concat_after: bool = False, static_chunk_size: int = 0, use_dynamic_left_chunk: bool = False, ): """Construct SqueezeformerEncoder Args: input_size to use_dynamic_chunk, see in Transformer BaseEncoder. encoder_dim (int): The hidden dimension of encoder layer. output_size (int): The output dimension of final projection layer. attention_heads (int): Num of attention head in attention module. num_blocks (int): Num of encoder layers. reduce_idx Optional[Union[int, List[int]]]: reduce layer index, from 40ms to 80ms per frame. recover_idx Optional[Union[int, List[int]]]: recover layer index, from 80ms to 40ms per frame. feed_forward_expansion_factor (int): Enlarge coefficient of FFN. dw_stride (bool): Whether do depthwise convolution on subsampling module. input_dropout_rate (float): Dropout rate of input projection layer. pos_enc_layer_type (str): Self attention type. time_reduction_layer_type (str): Conv1d or Conv2d reduction layer. do_rel_shift (bool): Whether to do relative shift operation on rel-attention module. cnn_module_kernel (int): Kernel size of CNN module. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. cnn_module_kernel (int): Kernel size of convolution module. adaptive_scale (bool): Whether to use adaptive scale. init_weights (bool): Whether to initialize weights. causal (bool): whether to use causal convolution or not. """ super(SqueezeformerEncoder, self).__init__() self.global_cmvn = global_cmvn self.reduce_idx: Optional[Union[int, List[int]]] = ( [reduce_idx] if type(reduce_idx) == int else reduce_idx ) self.recover_idx: Optional[Union[int, List[int]]] = ( [recover_idx] if type(recover_idx) == int else recover_idx ) self.check_ascending_list() if reduce_idx is None: self.time_reduce = None else: if recover_idx is None: self.time_reduce = "normal" # no recovery at the end else: self.time_reduce = "recover" # recovery at the end assert len(self.reduce_idx) == len(self.recover_idx) self.reduce_stride = 2 self._output_size = output_size self.normalize_before = normalize_before self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk self.pos_enc_layer_type = pos_enc_layer_type activation = get_activation(activation_type) # self-attention module definition if pos_enc_layer_type != "rel_pos":
encoder_selfattn_layer = MultiHeadedAttention
6
2023-11-15 09:19:27+00:00
16k
BobaZooba/xllm
src/xllm/cli/fuse.py
[ { "identifier": "Config", "path": "src/xllm/core/config.py", "snippet": "class Config:\n \"\"\"\n The `Config` class serves as a comprehensive configuration schema for managing various parameters required during\n the setup and execution of experiments relating to language models, such as training, quantization, and\n optimization.\n\n Write more here:\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#config\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#detailed-config-explanation\n\n This dataclass is used to encapsulate and standardize the configuration for a diverse range of tasks including\n dataset preparation, tokenizer and model initialization, training, evaluation, and interactions with remote services\n like the Hugging Face Model Hub.\n\n Attributes in this class cover aspects like model name and path, tokenizer settings, dataset paths, training\n strategies, quantization methods, hardware acceleration, logging, output directories, and more. The class provides\n properties with custom logic to resolve specific configurations and validation checks to ensure the environment is\n appropriately set up before proceeding with the workflow.\n\n Customization and flexibility are core to this class, as it provides reasonable defaults while also allowing for\n detailed and scalable configurations catered to advanced tasks such as leveraging LoRA, FSDP, deepspeed stage\n setups, and applying incremental quantization techniques like GPTQ and bits-and-bytes.\n\n Methods within the class include:\n - `check`: Performs checks across various attributes for compatibility and correctness.\n - Property getters such as `correct_tokenizer_name_or_path`, `lora_target_modules`, `dtype`, `deepspeed`, `fsdp`,\n and `lora_model_name_or_path_for_fusing` to fetch calculated or defaulted values based on attribute settings.\n\n Subclassing can be done to extend or modify the functionality of the `Config` class to address specific experimental\n scenarios or customized workflows. It is the central piece for orchestrating experimental setups and is intimately\n integrated with the rest of the codebase that operates on top of these configurations.\n\n Attributes:\n\n General Settings:\n - `experiment_key`: An enumeration key to specify the experiment type.\n - `save_safetensors`: A boolean value to indicate whether to use safe serialization for tensors.\n - `max_shard_size`: The maximum shard size when pushing the model to the HuggingFace Hub.\n - `local_rank`: Local rank for distributed training, used for logging and saving.\n - `use_gradient_checkpointing`: If set to `True`, enables gradient checkpointing to reduce memory usage at\n the cost of a slower backward pass.\n - `trainer_key`: An enumeration key to select the trainer using the trainers_registry.\n - `force_fp32`: Forces loading the model in fp32 precision, if set to `True`.\n - `force_fp16`: Forces loading the model in fp16 precision, if set to `True`.\n - `from_gptq`: Indicates if a GPTQ quantized model is being loaded.\n - `huggingface_hub_token`: Token for uploading models to HuggingFace Hub.\n - `deepspeed_stage`: Predefined DeepSpeed stage for optimization.\n - `deepspeed_config_path`: Path to the DeepSpeed config file.\n - `fsdp_strategy`: The strategy to be used for Fully Sharded Data Parallelism (FSDP).\n - `fsdp_offload`: If set to `True`, offloads weights to CPU when using FSDP to save memory.\n - `seed`: Seed for random number generators to ensure reproducibility.\n - `stabilize`: Converts some model weights to fp32 and others to bf16 for stabilization.\n - `path_to_env_file`: Custom path to the .env file for reading environment variables.\n\n Data Preparation:\n - `prepare_dataset`: Flags whether to prepare the dataset during the \"prepare\" stage.\n\n LoRA Fusing:\n - `lora_hub_model_id`: Name of the LoRA model on the hub for fusion.\n - `lora_model_local_path`: Local path to LoRA model to be fused.\n - `fused_model_local_path`: Local path to save the fused model.\n - `fuse_after_training`: If `True`, will fuse the model post-training.\n\n GPTQ Quantization:\n - `quantization_dataset_id`: Dataset ID for GPTQ quantization.\n - `quantization_max_samples`: Maximum number of samples to use during GPTQ quantization.\n - `quantized_model_path`: Path to save the GPTQ quantized model.\n - `quantized_hub_model_id`: Name of the model at the hub post-GPTQ quantization.\n - `quantized_hub_private_repo`: If set to `True`, creates a private repository for the quantized model.\n\n Dataset Related:\n - `dataset_key`: Key to select the dataset from the datasets_registry.\n - `train_local_path_to_data`: Local path to the training data file.\n - `eval_local_path_to_data`: Local path to the evaluation data file.\n - `shuffle`: If `True`, shuffles the training data.\n - `max_eval_samples`: Maximum number of examples to use for evaluation.\n - `add_eval_to_train_if_no_path`: If `True`, adds evaluation data to training if there's no separate eval path.\n\n Tokenizer Settings:\n - `tokenizer_name_or_path`: Name or path to the tokenizer.\n - `tokenizer_use_fast`: If `True`, uses the fast version of the tokenizer.\n - `tokenizer_padding_side`: Sets padding side to 'right' or 'left'.\n\n Data Collator Settings:\n - `collator_key`: Key to select the collator from the collators_registry.\n - `max_length`: Maximum sequence length for the model.\n\n Model Configuration:\n - `model_name_or_path`: Name or path to the model to be used.\n - `push_to_hub_bos_add_bos_token`: Adds BOS token when uploading tokenization configuration to the hub.\n - `use_flash_attention_2`: Flags the use of flash attention 2.\n - `trust_remote_code`: If `True`, trusts remote code from the HuggingFace Hub.\n - `device_map`: Device map for placing model layers on specific devices.\n - `prepare_model_for_kbit_training`: If `True`, prepares the model for k-bit training.\n\n BitsAndBytes Integration:\n - `load_in_8bit`: Load the model in 8-bit mode using bitsandbytes.\n - `load_in_4bit`: Load the model in 4-bit mode using bitsandbytes.\n - `llm_int8_threshold`: Threshold for detecting outliers in the model weights.\n - `llm_int8_has_fp16_weight`: If `True`, the model will have fp16 weights.\n - `bnb_4bit_use_double_quant`: If `True`, a second quantization step is used for 4-bit weights.\n - `bnb_4bit_quant_type`: Specifies the quantization type used for 4-bit weights.\n - `bnb_quantize_after_model_init`: Determines when the quantization should occur.\n\n GPTQ Specific Parameters:\n - `gptq_bits`: Number of bits for GPTQ quantization.\n - `gptq_group_size`: Group size for GPTQ quantization.\n - `gptq_disable_exllama`: If `True`, disables ExLlama kernels during GPTQ quantization.\n\n LoRA Specific Parameters:\n - `apply_lora`: If `True`, applies LoRA to the model.\n - `lora_rank`: LoRA rank to define the size of the LoRA matrices.\n - `lora_alpha`: Multiplication factor for the resulting LoRA matrix.\n - `lora_dropout`: Dropout rate for LoRA.\n - `raw_lora_target_modules`: Comma-separated string of module names to apply LoRA, or 'all' to apply broadly.\n\n Training Arguments:\n - `output_dir`: Path to save training outputs.\n - `per_device_train_batch_size`: Batch size per device for training.\n - `do_eval`: If `True`, performs evaluation.\n - `per_device_eval_batch_size`: Batch size per device for evaluation.\n - `gradient_accumulation_steps`: Number of steps to accumulate gradients for larger effective batch size.\n - `eval_accumulation_steps`: Number of steps to accumulate gradients during evaluation.\n - `eval_delay`: Delay before the first evaluation.\n - `eval_steps`: Number of update steps between evaluations.\n - `warmup_steps`: Number of steps for learning rate warmup.\n - `max_steps`: Maximum number of training steps.\n - `num_train_epochs`: Number of epochs for training.\n - `learning_rate`: Learning rate for the optimizer.\n - `max_grad_norm`: Gradient clipping threshold.\n - `weight_decay`: Coefficient for weight decay regularization.\n - `label_smoothing_factor`: Label smoothing factor.\n - `logging_steps`: Number of steps between logging intermediate results.\n - `save_steps`: Number of training steps between checkpoints and model upload.\n - `save_total_limit`: Maximum number of checkpoints to keep.\n - `optim`: Optimizer name, overwritten by DeepSpeed if used.\n - `push_to_hub`: If `True`, model checkpoints are uploaded to HuggingFace Hub.\n - `hub_model_id`: Name of the model on the HuggingFace Hub.\n - `hub_private_repo`: If `True`, creates a private repository on the HuggingFace Hub.\n\n Weights & Biases Integration:\n - `report_to_wandb`: If `True`, logs metrics to Weights & Biases.\n - `wandb_api_key`: API key for Weights & Biases.\n - `wandb_project`: Project name on Weights & Biases.\n - `wandb_entity`: Entity name (user or organization) on Weights & Biases.\n\n Example of creating a `Config` object:\n ```python\n config = Config(\n model_name_or_path='gpt2',\n dataset_key='my_dataset',\n gradient_accumulation_steps=8,\n max_length=512,\n deepspeed_stage=\"3\",\n )\n ```\n\n Note:\n - Throughout the codebase, `Config` instances are passed around to provide a unified source of configurations\n for various components.\n - It is crucial to ensure all required settings are properly set in a `Config` object before it is utilized,\n particularly when overriding defaults or specifying custom configurations.\n \"\"\"\n\n # general\n experiment_key: str = field(\n default=enums.Experiments.base,\n metadata={\"help\": \"Experiment class key\"},\n )\n save_safetensors: bool = field(\n default=True,\n metadata={\n \"help\": \"Use safe serialization (safe tensors) or not\",\n },\n )\n max_shard_size: str = field(\n default=\"10GB\", metadata={\"help\": \"max_shard_size for the model pushing to the HuggingFace Hub\"}\n )\n local_rank: int = field(\n default=0,\n metadata={\n \"help\": \"Local rank for logging and saving. Works only in distributed training\",\n },\n )\n use_gradient_checkpointing: bool = field(\n default=False,\n metadata={\n \"help\": \"If True, use gradient checkpointing to save memory at the expense of slower backward pass\",\n },\n )\n trainer_key: str = field(\n default=enums.Trainers.lm,\n metadata={\n \"help\": \"Key of the trainer for loading from trainers_registry\",\n },\n )\n force_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp32 when model loading\",\n },\n )\n force_fp16: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp16 when model loading\",\n },\n )\n from_gptq: bool = field(\n default=False,\n metadata={\n \"help\": \"If you loadining GPTQ quantized model\",\n },\n )\n huggingface_hub_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"HuggingFace Hub token. You can also set this key using .env file\",\n },\n )\n single_gpu: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Indicates that you are learning on the same GPU. It is necessary to use DeepSpeed on a single GPU\",\n },\n )\n master_port: int = field(\n default=9994,\n metadata={\n \"help\": \"Master port for running DeepSpeed on a single GPU. Modify if RuntimeError: Address already in use\",\n },\n )\n deepspeed_stage: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Predifined DeepSpeed stage\",\n },\n )\n deepspeed_config_path: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Path to DeepSpeed config\",\n },\n )\n fsdp_strategy: str = field(\n default=\"\",\n metadata={\n \"help\": \"FSDP strategy\",\n },\n )\n fsdp_offload: bool = field(\n default=True,\n metadata={\n \"help\": \"Offload weights when using FSDP\",\n },\n )\n seed: int = field(\n default=42,\n metadata={\n \"help\": \"Seed value for random operations\",\n },\n )\n stabilize: bool = field(\n default=False,\n metadata={\n \"help\": \"Stabilize the model. Convert some weights (e.g. LoRA) to bf16\",\n },\n )\n norm_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Convert norm to fp32\",\n },\n )\n path_to_env_file: Optional[str] = field(\n default=\"./.env\",\n metadata={\"help\": \"Custom path to .env file\"},\n )\n\n # prepare\n prepare_dataset: bool = field(\n default=True,\n metadata={\n \"help\": 'Prepare the dataset. Works only at \"prepare\" stage',\n },\n )\n\n # fuse\n lora_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. The name of the LoRA model at the hub for fusing. Example: BobaZooba/Shurale\",\n },\n )\n lora_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. Local path to the LoRA model\",\n },\n )\n fused_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Local path to fused model. Useful if you want to quantize model after fusing on the same machine\",\n },\n )\n fuse_after_training: bool = field(\n default=False,\n metadata={\n \"help\": \"Fuse or not model after training\",\n },\n )\n\n # gptq quantization\n quantization_dataset_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Dataset id for GPTQ quantization. You can install either the idi dataset, or use any dataset\",\n },\n )\n quantization_max_samples: int = field(\n default=1024,\n metadata={\n \"help\": \"Max samples for GPTQ quantization if you use custom dataset\",\n },\n )\n quantized_model_path: str = field(\n default=\"./quantized_model/\",\n metadata={\n \"help\": \"Path to GPTQ quantized model\",\n },\n )\n quantized_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub for GPTQ quantization. Example: BobaZooba/Shurale-GPTQ\",\n },\n )\n quantized_hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository for GPTQ quantization model or not\",\n },\n )\n\n # dataset\n dataset_key: str = field(\n default=enums.Datasets.soda,\n metadata={\n \"help\": \"Key of the dataset for loading from datasets_registry\",\n },\n )\n train_local_path_to_data: str = field(\n default=\"./train.jsonl\",\n metadata={\n \"help\": \"The path to the local training data file\",\n },\n )\n eval_local_path_to_data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The path to the local eval data file\",\n },\n )\n shuffle: bool = field(\n default=True,\n metadata={\n \"help\": \"Shuffle training data\",\n },\n )\n max_eval_samples: int = field(\n default=1_000,\n metadata={\n \"help\": \"Maximum number of examples for evaluation\",\n },\n )\n add_eval_to_train_if_no_path: bool = field(\n default=False,\n metadata={\n \"help\": \"Add evaluation data to training data if their number is greater than max_eval_samples\",\n },\n )\n\n # tokenizer\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Tokenizer name or path. If the value is not set, \"\n \"then it will be taken from the model_name_or_path\",\n },\n )\n tokenizer_use_fast: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Use fast flag for the tokenizer\",\n },\n )\n tokenizer_padding_side: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Padding side of the collator: None, right or left\",\n },\n )\n\n # collator\n collator_key: str = field(\n default=enums.Collators.lm,\n metadata={\n \"help\": \"Key of the collator for loading from collators_registry\",\n },\n )\n max_length: int = field(\n default=2048,\n metadata={\n \"help\": \"Max sequence length of the model\",\n },\n )\n\n # model\n model_name_or_path: str = field(\n default=\"mistralai/Mistral-7B-v0.1\",\n metadata={\n \"help\": \"Model name or path. It could be from HuggingFace or locally\",\n },\n )\n push_to_hub_bos_add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload to the hub tokenization config with add_bos_token equals to True. Might be helpful for TGI\"\n },\n )\n use_flash_attention_2: bool = field(\n default=False,\n metadata={\n \"help\": \"Use or not flash attention 2. Requires 1) CUDA >= 11.6; 2) install flash-attn 3) compatible model\",\n },\n )\n trust_remote_code: bool = field(\n default=False,\n metadata={\n \"help\": \"Trust remote code from HuggingFace\",\n },\n )\n device_map: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Device map for loading the model\",\n },\n )\n prepare_model_for_kbit_training: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Prepare or not for kbit training\",\n },\n )\n offload_folder: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Offloading folder. Helps for fusing in colab\",\n },\n )\n\n # bitsandbytes\n load_in_8bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 8 bit using bitsandbytes\",\n },\n )\n load_in_4bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 4 bit using bitsandbytes\",\n },\n )\n llm_int8_threshold: float = field(\n default=6.0,\n metadata={\n \"help\": \"Threshold for outlier detection\",\n },\n )\n llm_int8_has_fp16_weight: bool = field(\n default=True,\n metadata={\n \"help\": \"LLM has weights in fp16\",\n },\n )\n bnb_4bit_use_double_quant: bool = field(\n default=True,\n metadata={\n \"help\": \"Double quantization. \"\n \"This will enable a second quantization after the first \"\n \"one to save an additional 0.4 bits per parameter\",\n },\n )\n bnb_4bit_quant_type: str = field(\n default=\"nf4\",\n metadata={\n \"help\": \"Quantization type for 4 bit\",\n },\n )\n bnb_quantize_after_model_init: bool = field(\n default=False, metadata={\"help\": \"If False, quantization will be at model init\"}\n )\n\n # gptq\n gptq_bits: int = field(\n default=4,\n metadata={\n \"help\": \"Bits for GPTQ quantization\",\n },\n )\n gptq_group_size: int = field(\n default=128,\n metadata={\n \"help\": \"Group size for GPTQ quantization\",\n },\n )\n gptq_disable_exllama: bool = field(\n default=True,\n metadata={\n \"help\": \"Disable ExLlama kernels for GPTQ quantization\",\n },\n )\n\n # lora\n apply_lora: bool = field(\n default=False,\n metadata={\n \"help\": \"Apply LoRA to the model or not\",\n },\n )\n lora_rank: int = field(\n default=8,\n metadata={\n \"help\": \"LoRA rank value. LoRA matrices W_A x R and R x W_B, where R is LoRA rank\",\n },\n )\n lora_alpha: int = field(\n default=32,\n metadata={\n \"help\": \"LoRA alpha value. The resulting LoRA matrix will be multiplied by this value\",\n },\n )\n lora_dropout: float = field(\n default=0.1,\n metadata={\n \"help\": \"LoRA dropout value\",\n },\n )\n raw_lora_target_modules: str = field(\n default=\"all\",\n metadata={\n \"help\": 'Names of modules to apply LoRA. A comma-separated string, for example: \"k,q,v\". '\n 'When setting the value \"all\", LoRA will be applied to all linear layers, except for the '\n \"input embeddings and the lm_head.\",\n },\n )\n\n # training arguments\n output_dir: str = field(\n default=\"./outputs/\",\n metadata={\n \"help\": \"The path to the directory where the artifacts will be saved\",\n },\n )\n per_device_train_batch_size: int = field(\n default=2,\n metadata={\n \"help\": \"Batch size on each GPU\",\n },\n )\n do_eval: bool = field(\n default=False,\n metadata={\n \"help\": \"Run eval or not\",\n },\n )\n per_device_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Batch size on each GPU for evaluation. \"\n \"If None per_device_eval_batch_size equals to per_device_train_batch_size\",\n },\n )\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\n \"help\": \"Number of steps to accumulate gradients\",\n },\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of steps to accumulate gradients at evaluation.\"\n \"If None eval_accumulation_steps equals to gradient_accumulation_steps\",\n },\n )\n eval_delay: int = field(\n default=0,\n metadata={\n \"help\": \"Number of epochs or steps to wait for before the first \"\n \"evaluation can be performed, depending on the evaluation_strategy\"\n },\n )\n eval_steps: Optional[int] = field(\n default=1_000, metadata={\"help\": \"Number of update steps between two evaluations\"}\n )\n warmup_steps: int = field(\n default=1_000,\n metadata={\n \"help\": \"Number of steps to warm up\",\n },\n )\n max_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Maximum number of training steps\",\n },\n )\n num_train_epochs: int = field(\n default=1,\n metadata={\n \"help\": \"Number of training epochs\",\n },\n )\n learning_rate: float = field(\n default=2e-4,\n metadata={\n \"help\": \"Learning rate value\",\n },\n )\n max_grad_norm: float = field(\n default=1.0,\n metadata={\n \"help\": \"Clip grad value\",\n },\n )\n weight_decay: float = field(\n default=0.001,\n metadata={\n \"help\": \"Weight decay value\",\n },\n )\n label_smoothing_factor: float = field(\n default=0.0,\n metadata={\n \"help\": \"Label smoothing value\",\n },\n )\n logging_steps: int = field(\n default=10,\n metadata={\n \"help\": \"Number of steps between logging\",\n },\n )\n save_steps: int = field(\n default=100,\n metadata={\n \"help\": \"The number of training steps between saving the checkpoint and uploading to the hub\",\n },\n )\n save_total_limit: int = field(\n default=1,\n metadata={\n \"help\": \"The number of checkpoints that are saved locally\",\n },\n )\n optim: Optional[str] = field(\n default=\"paged_adamw_8bit\",\n metadata={\n \"help\": \"Optimizer name. It will be overwritten if you use deepspeed\",\n },\n )\n push_to_hub: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload the model to the hub. \"\n \"The model will be uploaded to the hub every save_steps. \"\n \"If LoRA is used, then LoRA's weights will be loaded onto the hub\",\n },\n )\n hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub. Example: BobaZooba/Shurale\",\n },\n )\n hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository or not\",\n },\n )\n neftune_noise_alpha: Optional[float] = field(\n default=None,\n metadata={\n \"help\": \"If not None, this will activate NEFTune noise embeddings. \"\n \"This can drastically improve model performance for instruction fine-tuning\",\n },\n )\n\n # training traction\n project_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Project name for training traction services like W&B\",\n },\n )\n report_to_wandb: bool = field(\n default=False,\n metadata={\n \"help\": \"Report or not to Weight & Biases\",\n },\n )\n wandb_api_key: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases API key. You can also set this key using .env file\",\n },\n )\n wandb_project: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Depreacted, use project_name. Weight & Biases project name\",\n },\n )\n wandb_entity: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases entity name (user or company)\",\n },\n )\n\n def __post_init__(self):\n if self.huggingface_hub_token is not None:\n os.environ[enums.EnvironmentVariables.huggingface_hub_token] = self.huggingface_hub_token\n dist_logger(message=f\"Environment variable {enums.EnvironmentVariables.huggingface_hub_token} set\")\n\n if self.report_to_wandb:\n for key, value in zip(\n [\n enums.EnvironmentVariables.wandb_api_key,\n enums.EnvironmentVariables.wandb_project,\n enums.EnvironmentVariables.wandb_entity,\n ],\n [\n self.wandb_api_key,\n self.correct_project_name,\n self.wandb_entity,\n ],\n ):\n if value is not None:\n os.environ[key] = value\n dist_logger(message=f\"Environment variable {key} set\")\n else:\n os.environ[enums.EnvironmentVariables.wandb_disabled] = \"true\"\n\n @property\n def correct_project_name(self) -> Optional[str]:\n if self.project_name is not None and self.wandb_project is not None:\n dist_logger.warning(\n message=\"You set both project_name and wandb_project.\"\n \"Priority set to project_name for experiment tracking\"\n )\n return self.project_name\n elif self.project_name is not None:\n return self.project_name\n elif self.wandb_project is not None:\n dist_logger.warning(message=\"wandb_project is depreacted, please use project_name instead\")\n return self.wandb_project\n else:\n return None\n\n def check_hub(self) -> None:\n if self.push_to_hub and self.hub_model_id is None:\n raise ValueError(\"You want to push to HF hub, but hub_model_id is None\")\n elif self.hub_model_id is not None and not self.push_to_hub:\n dist_logger.warning(\"You set hub_model_id, but push_to_hub is False\")\n\n return None\n\n def apply_deepspeed_single_gpu(self) -> None:\n os.environ[enums.EnvironmentVariables.master_address] = \"localhost\"\n os.environ[enums.EnvironmentVariables.master_port] = str(self.master_port)\n os.environ[enums.EnvironmentVariables.rank] = \"0\"\n os.environ[enums.EnvironmentVariables.local_rank] = \"0\"\n os.environ[enums.EnvironmentVariables.world_size] = \"1\"\n\n def check_deepspeed(self) -> None:\n if self.deepspeed is not None:\n spec = find_spec(\"deepspeed\")\n\n if spec is None:\n raise ImportError(\"Deepspeed is not None, but failed to import deepspeed. Please install deepspeed.\")\n\n if self.single_gpu:\n self.apply_deepspeed_single_gpu()\n\n return None\n\n def check_flash_attention(self) -> None:\n if self.use_flash_attention_2:\n if not torch.cuda.is_available():\n raise ImportError(\"You want to use flash_attention_2, but CUDA is not available\")\n\n spec = find_spec(\"flash_attn\")\n\n if spec is None:\n raise ImportError(\n \"You want to use flash_attention_2, but flash-attn is not installed. Please install flash-attn.\"\n )\n\n return None\n\n def check_auto_gptq(self) -> None:\n spec = find_spec(\"auto_gptq\")\n\n if spec is None:\n raise ImportError(\n \"You want to quantize model using GPTQ, but auto-gptq is not installed. Please install auto-gptq.\"\n )\n\n return None\n\n def check(self) -> None:\n \"\"\"\n Performs a series of checks to validate the configuration for compatibility with the training environment.\n\n This method is responsible for ensuring that the environment is properly set up for the actions specified in\n the configuration object, such as pushing to Hugging Face's hub, using deepspeed, and using flash attention.\n\n It includes the following checks:\n - Verifies that credentials for Hugging Face hub are provided if the model is intended to be pushed to the hub.\n - Validates that deepspeed is installed if it is specified in the configuration.\n - Ensures that the necessary packages are installed for using flash attention if configured to do so.\n\n Does not return any value.\n\n Raises:\n - ValueError: If the configuration for hub interaction is incorrect.\n - ImportError: If any of the required libraries (e.g., deepspeed, flash-attn, auto-gptq) are not installed.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(...)\n # Before proceeding with training or other operations, run checks to ensure environment compatibility.\n config.check()\n ```\n\n Note:\n - Always invoke this method after initializing a `Config` object and before proceeding with model training\n or other operations that rely on the configuration settings.\n \"\"\"\n self.check_hub()\n self.check_deepspeed()\n self.check_flash_attention()\n\n return None\n\n @property\n def correct_tokenizer_name_or_path(self) -> str:\n \"\"\"\n Resolves the tokenizer name or path to be used for initializing the tokenizer.\n\n This property ensures that if a specific tokenizer name or path is not provided in the configuration object,\n the model name or path is used instead, maintaining consistency between model and tokenizer.\n\n Returns:\n `str`: The name or path of the tokenizer to be used. If `tokenizer_name_or_path` is specified in `Config`\n object, that value is used. Otherwise, `model_name_or_path` is returned as the default tokenizer identifier.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(model_name_or_path=\"gpt2\", tokenizer_name_or_path=None)\n tokenizer_name_or_path = config.correct_tokenizer_name_or_path\n # tokenizer_name_or_path now holds the value \"gpt2\"\n ```\n\n Note:\n - It is a common practice to use the same identifier for both the model and its corresponding tokenizer.\n This property handles such a case automatically when the `tokenizer_name_or_path` is not explicitly set.\n \"\"\"\n if self.tokenizer_name_or_path is not None:\n return self.tokenizer_name_or_path\n else:\n return self.model_name_or_path\n\n @property\n def lora_target_modules(self) -> Optional[List[str]]:\n \"\"\"\n Interprets the LoRA target modules setting from the configuration to determine which model modules to apply\n LoRA to.\n\n LoRA (Low-Rank Adaptation) is a parameter-efficient training method that modifies specific layers within a\n model. This property is responsible for parsing the `raw_lora_target_modules` configuration to identify\n the specific modules (like attention key, query, and value matrices) that LoRA will be applied to.\n\n Returns:\n Optional[List[str]]: A list of module names to apply LoRA to if specified, otherwise `None` if LoRA should\n be applied to all eligible modules as determined by the string \"all\" in `raw_lora_target_modules`.\n\n Raises:\n ValueError: If `raw_lora_target_modules` is not set.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with LoRA targets specified.\n config = Config(raw_lora_target_modules=\"k,q,v\")\n lora_modules = config.lora_target_modules\n # lora_modules now holds the list ['k', 'q', 'v'].\n ```\n\n Note:\n - The `raw_lora_target_modules` should be provided as a comma-separated string specifying the target\n modules. If LoRA should be applied broadly, the value \"all\" can be used.\n \"\"\"\n if self.raw_lora_target_modules == \"all\":\n return None\n elif self.raw_lora_target_modules is not None:\n modules_names = [module_name.strip() for module_name in self.raw_lora_target_modules.split(\",\")]\n return modules_names\n else:\n raise ValueError(\"raw_lora_target_modules doesn't set\")\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Determines the appropriate PyTorch data type for the model based on availability of CUDA and configuration\n settings.\n\n This property assists in setting computational precision for training and inference (e.g., FP32, FP16, BF16),\n basing the decision on system capabilities and user preferences as defined in the `Config` object. The selected\n data type can impact both the computational efficiency and memory usage of the model operations.\n\n Returns:\n `torch.dtype`: The data type to be used for the model tensors. This can be one of the following based on the\n system's CUDA support and configuration flags: `torch.float32` (FP32), `torch.float16` (FP16), or\n `torch.bfloat16` (BF16).\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(force_fp32=False, force_fp16=True)\n model_dtype = config.dtype\n # If CUDA is available and BF16 is supported, model_dtype will be `torch.bfloat16`.\n # Otherwise, it falls back to `torch.float16` due to the forced FP16 configuration.\n ```\n\n Note:\n - This property plays a critical role in memory management and computational efficiency, especially when\n working with large models or limited system resources.\n \"\"\"\n if not torch.cuda.is_available() or self.force_fp32:\n return torch.float32\n elif self.force_fp16:\n return torch.float16\n elif torch.cuda.is_bf16_supported():\n return torch.bfloat16\n else:\n return torch.float16\n\n @property\n def deepspeed(self) -> Optional[Dict[str, Any]]:\n \"\"\"\n Retrieves the deepspeed configuration dictionary based on settings within the `Config` object.\n\n This property parses the deepspeed settings from the configuration to construct the configuration dictionary\n used for ing up deepspeed in the model's training environment. It determines whether a predefined stage\n or a custom configuration file path should be utilized.\n\n Returns:\n `Optional[Dict[str, Any]]`: A dictionary containing deepspeed configurations, or `None` if deepspeed is not\n to be used.\n\n Raises:\n ValueError: If the `deepspeed_stage` specified does not correspond to a known configuration,\n or if a custom deepspeed configuration file path does not exist.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with deepspeed specifications.\n config = Config(deepspeed_stage=\"2\")\n ds_config = config.deepspeed\n # ds_config now contains the deepspeed configuration for stage 2.\n ```\n\n Note:\n - A deepspeed stage is a set of predefined configurations. If this is set, the corresponding configuration\n will be used and any custom deepspeed configuration file will be ignored.\n - If a custom deepspeed configuration file path is given and it exists, that configuration will be loaded\n and used.\n \"\"\"\n deepspeed_config: Optional[Dict[str, Any]] = None\n\n if self.deepspeed_config_path is not None:\n if os.path.isfile(self.deepspeed_config_path):\n with open(self.deepspeed_config_path) as file_object:\n deepspeed_config = json.load(file_object)\n return deepspeed_config\n else:\n raise ValueError(f\"deepspeed_config_path set to {self.deepspeed_config_path}, but not found\")\n\n if self.deepspeed_stage in [0, \"0\", \"stage_0\"]:\n return None\n\n if self.deepspeed_stage is not None:\n deepspeed_config = DS_CONFIG_MAPPER.get(self.deepspeed_stage, None)\n if deepspeed_config is None:\n raise ValueError(\n f'Deepspeed stage \"{self.deepspeed_stage}\" not found in keys: {list(DS_CONFIG_MAPPER.keys())}'\n )\n\n return deepspeed_config\n\n @property\n def fsdp(self) -> Union[str, List[str]]:\n \"\"\"\n Compiles the configurations for Fully Sharded Data Parallel (FSDP) based on the settings in the `Config` object.\n\n This property creates a list containing FSDP-related options, which informs the training process whether to\n enable FSDP and which FSDP strategy to employ.\n\n A list of options (fsdp_strategy) along the following:\n \"full_shard\": Shard parameters, gradients and optimizer states.\n \"shard_grad_op\": Shard optimizer states and gradients.\n \"offload\": Offload parameters and gradients to CPUs (only compatible with \"full_shard\" and \"shard_grad_op\").\n \"auto_wrap\": Automatically recursively wrap layers with FSDP using default_auto_wrap_policy.\n\n Returns:\n `Union[str, List[str]]`: A list of FSDP options as strings. It can be an empty string if FSDP is not used or\n a list with the specified FSDP strategy and options such as offloading.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with FSDP specifications.\n config = Config(fsdp_strategy=\"full_shard\", fsdp_offload=True)\n fsdp_options = config.fsdp\n ```\n\n Note:\n - FSDP strategies and options improve memory efficiency during distributed training by sharding the model's\n parameters across multiple devices.\n - The FSDP settings in the configuration should match the target training environment and system\n capabilities.\n \"\"\"\n fsdp_options = list()\n\n if self.fsdp_strategy is not None and self.fsdp_strategy != \"\":\n fsdp_options.append(self.fsdp_strategy)\n else:\n return \"\"\n\n if self.fsdp_offload:\n fsdp_options.append(FSDPOption.OFFLOAD)\n\n return fsdp_options\n\n @property\n def lora_model_name_or_path_for_fusing(self) -> str:\n \"\"\"\n Determines the name or path of the LoRA model to be used for the fusing process.\n\n This property resolves which model should be fused by checking whether a model ID from the Hugging Face hub or a\n local path to a LoRA model is provided in the configuration object. It is essential for the fusing operation\n when LoRA weights need to be integrated into the base model.\n\n Returns:\n `str`: The Hugging Face hub model ID or the local file path to the LoRA model, depending on which is\n specified.\n\n Raises:\n ValueError: If neither `lora_hub_model_id` nor `lora_model_local_path` is set, indicating that there is no\n model specified for fusing.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with a specified LoRA model on Hugging Face Hub or locally.\n config = Config(lora_hub_model_id=\"username/model-id\", lora_model_local_path=None)\n model_name_or_path = config.lora_model_name_or_path_for_fusing\n # model_name_or_path will hold the value \"username/model-id\".\n ```\n\n Note:\n - This property is specifically used during the model fusing step and should be configured correctly in\n scenarios where LoRA is utilized.\n \"\"\"\n if self.lora_hub_model_id is not None:\n return self.lora_hub_model_id\n elif self.lora_model_local_path is not None:\n return self.lora_model_local_path\n else:\n raise ValueError(\"Please set lora_hub_model_id or lora_model_local_path for fusing\")\n\n @property\n def need_to_prepare_model_for_kbit_training(self) -> bool:\n if self.prepare_model_for_kbit_training is not None:\n return self.prepare_model_for_kbit_training\n else:\n return self.from_gptq or self.load_in_4bit or self.load_in_8bit" }, { "identifier": "fuse", "path": "src/xllm/run/fuse.py", "snippet": "def fuse(config: Config) -> Tuple[PreTrainedTokenizer, PreTrainedModel]:\n \"\"\"\n Performs the model parameter fusion step for models that use LoRA (Low-Rank Adaptation) during training.\n\n This function specifically deals with models that were trained with the LoRA technique, where additional\n learnable parameters were introduced during training for adapting the pre-existing weights. The fusing process\n integrates these parameters into the main model weights, effectively finalizing the model before deployment or\n further usage.\n\n Args:\n config (`Config`):\n The configuration object that holds settings and parameters for the fusing process, including\n the model and LoRA weights paths locally or at Huggingface Hub.\n\n Returns:\n Tuple[PreTrainedTokenizer, PreTrainedModel]:\n A tuple containing the tokenizer and the fused model after the LoRA parameters have been integrated.\n\n During its execution, the `fuse` function calls the `fuse_lora` utility, which handles the intricacies of the\n LoRA fusion process based on the provided configuration. After successful fusion, it logs a message to indicate\n that the process is complete.\n\n Example usage:\n ```python\n from some_module.config import Config\n\n # Assuming we have a predefined Config object for a model trained with LoRA.\n config = Config(...)\n tokenizer, fused_model = fuse(config=config)\n\n # `tokenizer` and `fused_model` can now be used for inference or further steps following the fusion.\n ```\n\n Note:\n LoRA fusing is a critical step for models that were trained using the LoRA technique. It should be done prior\n to using such models for inference, as it ensures the trained adaptations are correctly reflected in the model's\n behavior.\n \"\"\"\n tokenizer, model = fuse_lora(config=config)\n logger.info(\"Fusing complete\")\n\n return tokenizer, model" }, { "identifier": "setup_cli", "path": "src/xllm/utils/cli.py", "snippet": "def setup_cli(config: Config, logger_path: str = \"xllm.log\", rotation: str = \"5 MB\") -> None:\n \"\"\"\n Sets up the command-line interface (CLI) environment for language model training and evaluation\n by initializing the logger, loading environment variables, and setting global configuration options\n for tokenization and seeding.\n\n Args:\n config (`Config`):\n The experiment's configuration object that contains necessary parameters,\n including the path to a `.env` file, seed value for reproducibility, and settings related\n to Weights & Biases (wandb) reporting.\n logger_path (`str`, defaults to \"xllm.log\"):\n The file path where the log records will be stored.\n rotation (`str`, defaults to \"5 MB\"):\n The policy that determines when a new log file is started. It could be a size limit (like \"5 MB\"),\n a time period, or a condition.\n\n This function performs several key setup steps:\n\n - Initializes the file logger with the specified `logger_path` and `rotation` policy, which manages\n log file rotation based on the file size limit or other criteria.\n - Loads environment variables from the `.env` file specified by the `config.path_to_env_file` attribute.\n This step is crucial for retrieving sensitive information, which should not be hardcoded in the code,\n such as API keys.\n - Sets tokenization-related environment variables to avoid parallelism-related warnings or issues during\n tokenization processes.\n - Checks and issues warnings if API keys for Weights & Biases or HuggingFace Hub are not found\n in the environment variables, which are essential for model reporting and uploading.\n - Seeds the random number generators for libraries like Transformers to ensure reproducibility across runs.\n - Sets the logging verbosity level for the Transformers library to suppress unnecessary messages during execution.\n\n The `setup_cli` function is typically called at the start of a training or evaluation run to ensure that\n the environment is correctly configured and that all requisite external dependencies are in place and\n properly initialized for the rest of the experiment's execution.\n \"\"\"\n\n logger.add(logger_path, rotation=rotation)\n load_dotenv(dotenv_path=config.path_to_env_file)\n logger.info(\".env loaded\")\n\n os.environ[enums.EnvironmentVariables.tokenizers_parallelism] = \"false\"\n\n if config.report_to_wandb and enums.EnvironmentVariables.wandb_api_key not in os.environ:\n logger.warning(\"W&B token not found in env vars\")\n\n if enums.EnvironmentVariables.huggingface_hub_token not in os.environ:\n logger.warning(\"HuggingFaceHub token not found in env vars\")\n\n transformers.set_seed(seed=config.seed)\n transformers.logging.set_verbosity_error()\n logger.info(f'Logger path \"{logger_path}\" with rotation \"{rotation}\"')\n\n return None" } ]
from typing import Tuple, Type from transformers import HfArgumentParser, PreTrainedModel, PreTrainedTokenizer from ..core.config import Config from ..run.fuse import fuse from ..utils.cli import setup_cli
12,316
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_run_fuse( config_cls: Type[Config] = Config, ) -> Tuple[PreTrainedTokenizer, PreTrainedModel]: """ Provides a command-line interface (CLI) entry point for fusing LoRA parameters into the model after training. This function serves as a script for fusing parameters using the LoRA technique by parsing command-line arguments to configure the process and then invoking the `fuse` function. It also manages CLI-related configurations, including setting up logging for the process. Args: config_cls (Type[Config], defaults to `Config`): The configuration class type to be used for parsing the command-line arguments into a configuration object. This class should define the necessary parameters for the fusing process. Returns: Tuple[PreTrainedTokenizer, PreTrainedModel]: A tuple containing the tokenizer and the LoRA-fused model. The function performs the following steps: - Initializes an `HfArgumentParser` object with `config_cls` to handle command-line arguments. - Parses the arguments into a configuration object. - Sets up CLI interactions including logging to a file (default is `./xllm_fuse.log`). - Calls the `fuse` function with the parsed configuration object to begin the fusing process. - Returns the tokenizer and the model that have been processed. When the script is executed directly from the command line, it will run the following as part of the main program: - Parse the command-line arguments into a `Config` object. - Fuse the LoRA parameters in the trained model while logging the output to `xllm_fuse.log`. - Return the tokenizer and LoRA-fused model. Example CLI usage: ```sh python cli_run_fuse.py --model_name_or_path my_model ``` Note: This function is particularly meant to be used when working with models that were trained with the LoRA technique. It is intended to be used as part of a CLI workflow and should be executed directly from the terminal or within a script. """ parser = HfArgumentParser(config_cls) config = parser.parse_args_into_dataclasses()[0] setup_cli(config=config, logger_path="./xllm_fuse.log")
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_run_fuse( config_cls: Type[Config] = Config, ) -> Tuple[PreTrainedTokenizer, PreTrainedModel]: """ Provides a command-line interface (CLI) entry point for fusing LoRA parameters into the model after training. This function serves as a script for fusing parameters using the LoRA technique by parsing command-line arguments to configure the process and then invoking the `fuse` function. It also manages CLI-related configurations, including setting up logging for the process. Args: config_cls (Type[Config], defaults to `Config`): The configuration class type to be used for parsing the command-line arguments into a configuration object. This class should define the necessary parameters for the fusing process. Returns: Tuple[PreTrainedTokenizer, PreTrainedModel]: A tuple containing the tokenizer and the LoRA-fused model. The function performs the following steps: - Initializes an `HfArgumentParser` object with `config_cls` to handle command-line arguments. - Parses the arguments into a configuration object. - Sets up CLI interactions including logging to a file (default is `./xllm_fuse.log`). - Calls the `fuse` function with the parsed configuration object to begin the fusing process. - Returns the tokenizer and the model that have been processed. When the script is executed directly from the command line, it will run the following as part of the main program: - Parse the command-line arguments into a `Config` object. - Fuse the LoRA parameters in the trained model while logging the output to `xllm_fuse.log`. - Return the tokenizer and LoRA-fused model. Example CLI usage: ```sh python cli_run_fuse.py --model_name_or_path my_model ``` Note: This function is particularly meant to be used when working with models that were trained with the LoRA technique. It is intended to be used as part of a CLI workflow and should be executed directly from the terminal or within a script. """ parser = HfArgumentParser(config_cls) config = parser.parse_args_into_dataclasses()[0] setup_cli(config=config, logger_path="./xllm_fuse.log")
tokenizer, model = fuse(config=config)
1
2023-11-10 17:55:03+00:00
16k
AMAAI-Lab/mustango
audioldm_eval/eval.py
[ { "identifier": "load_npy_data", "path": "audioldm_eval/datasets/load_mel.py", "snippet": "def load_npy_data(loader):\n new_train = []\n for mel, waveform, filename in tqdm(loader):\n batch = batch.float().numpy()\n new_train.append(\n batch.reshape(\n -1,\n )\n )\n new_train = np.array(new_train)\n return new_train" }, { "identifier": "MelPairedDataset", "path": "audioldm_eval/datasets/load_mel.py", "snippet": "class MelPairedDataset(torch.utils.data.Dataset):\n def __init__(\n self,\n datadir1,\n datadir2,\n _stft,\n sr=16000,\n fbin_mean=None,\n fbin_std=None,\n augment=False,\n limit_num=None,\n ):\n self.datalist1 = [os.path.join(datadir1, x) for x in os.listdir(datadir1)]\n self.datalist1 = sorted(self.datalist1)\n self.datalist1 = [item for item in self.datalist1 if item.endswith(\".wav\")]\n\n self.datalist2 = [os.path.join(datadir2, x) for x in os.listdir(datadir2)]\n self.datalist2 = sorted(self.datalist2)\n self.datalist2 = [item for item in self.datalist2 if item.endswith(\".wav\")]\n\n if limit_num is not None:\n self.datalist1 = self.datalist1[:limit_num]\n self.datalist2 = self.datalist2[:limit_num]\n\n self.align_two_file_list()\n\n self._stft = _stft\n self.sr = sr\n self.augment = augment\n\n # if fbin_mean is not None:\n # self.fbin_mean = fbin_mean[..., None]\n # self.fbin_std = fbin_std[..., None]\n # else:\n # self.fbin_mean = None\n # self.fbin_std = None\n\n def align_two_file_list(self):\n data_dict1 = {os.path.basename(x): x for x in self.datalist1}\n data_dict2 = {os.path.basename(x): x for x in self.datalist2}\n\n keyset1 = set(data_dict1.keys())\n keyset2 = set(data_dict2.keys())\n\n intersect_keys = keyset1.intersection(keyset2)\n\n self.datalist1 = [data_dict1[k] for k in intersect_keys]\n self.datalist2 = [data_dict2[k] for k in intersect_keys]\n\n # print(\"Two path have %s intersection files\" % len(intersect_keys))\n\n def __getitem__(self, index):\n while True:\n try:\n filename1 = self.datalist1[index]\n filename2 = self.datalist2[index]\n mel1, _, audio1 = self.get_mel_from_file(filename1)\n mel2, _, audio2 = self.get_mel_from_file(filename2)\n break\n except Exception as e:\n print(index, e)\n index = (index + 1) % len(self.datalist)\n\n # if(self.fbin_mean is not None):\n # mel = (mel - self.fbin_mean) / self.fbin_std\n min_len = min(mel1.shape[-1], mel2.shape[-1])\n return (\n mel1[..., :min_len],\n mel2[..., :min_len],\n os.path.basename(filename1),\n (audio1, audio2),\n )\n\n def __len__(self):\n return len(self.datalist1)\n\n def get_mel_from_file(self, audio_file):\n audio, file_sr = torchaudio.load(audio_file)\n # Only use the first channel\n audio = audio[0:1,...]\n audio = audio - audio.mean()\n\n if file_sr != self.sr:\n audio = torchaudio.functional.resample(\n audio, orig_freq=file_sr, new_freq=self.sr\n )\n\n if self._stft is not None:\n melspec, energy = self.get_mel_from_wav(audio[0, ...])\n else:\n melspec, energy = None, None\n\n return melspec, energy, audio\n\n def get_mel_from_wav(self, audio):\n audio = torch.clip(torch.FloatTensor(audio).unsqueeze(0), -1, 1)\n audio = torch.autograd.Variable(audio, requires_grad=False)\n\n # =========================================================================\n # Following the processing in https://github.com/v-iashin/SpecVQGAN/blob/5bc54f30eb89f82d129aa36ae3f1e90b60e73952/vocoder/mel2wav/extract_mel_spectrogram.py#L141\n melspec, energy = self._stft.mel_spectrogram(audio, normalize_fun=torch.log10)\n melspec = (melspec * 20) - 20\n melspec = (melspec + 100) / 100\n melspec = torch.clip(melspec, min=0, max=1.0)\n # =========================================================================\n # Augment\n # if(self.augment):\n # for i in range(1):\n # random_start = int(torch.rand(1) * 950)\n # melspec[0,:,random_start:random_start+50] = 0.0\n # =========================================================================\n melspec = torch.squeeze(melspec, 0).numpy().astype(np.float32)\n energy = torch.squeeze(energy, 0).numpy().astype(np.float32)\n return melspec, energy" }, { "identifier": "WaveDataset", "path": "audioldm_eval/datasets/load_mel.py", "snippet": "class WaveDataset(torch.utils.data.Dataset):\n def __init__(\n self,\n datadir,\n sr=16000,\n limit_num=None,\n ):\n self.datalist = [os.path.join(datadir, x) for x in os.listdir(datadir)]\n self.datalist = sorted(self.datalist)\n self.datalist = [item for item in self.datalist if item.endswith(\".wav\")]\n \n if limit_num is not None:\n self.datalist = self.datalist[:limit_num]\n self.sr = sr\n\n def __getitem__(self, index):\n while True:\n try:\n filename = self.datalist[index]\n waveform = self.read_from_file(filename)\n if waveform.size(-1) < 1:\n raise ValueError(\"empty file %s\" % filename)\n break\n except Exception as e:\n print(index, e)\n index = (index + 1) % len(self.datalist)\n \n return waveform, os.path.basename(filename)\n\n def __len__(self):\n return len(self.datalist)\n\n def read_from_file(self, audio_file):\n audio, file_sr = torchaudio.load(audio_file)\n # Only use the first channel\n audio = audio[0:1,...]\n audio = audio - audio.mean()\n\n if file_sr != self.sr and file_sr == 32000 and self.sr == 16000:\n audio = audio[..., ::2]\n if file_sr != self.sr and file_sr == 48000 and self.sr == 16000:\n audio = audio[..., ::3]\n elif file_sr != self.sr:\n audio = torchaudio.functional.resample(\n audio, orig_freq=file_sr, new_freq=self.sr\n )\n audio = pad_short_audio(audio, min_samples=32000)\n return audio" }, { "identifier": "FrechetAudioDistance", "path": "audioldm_eval/metrics/fad.py", "snippet": "class FrechetAudioDistance:\n def __init__(\n self, use_pca=False, use_activation=False, verbose=False, audio_load_worker=8\n ):\n self.__get_model(use_pca=use_pca, use_activation=use_activation)\n self.verbose = verbose\n self.audio_load_worker = audio_load_worker\n\n def __get_model(self, use_pca=False, use_activation=False):\n \"\"\"\n Params:\n -- x : Either\n (i) a string which is the directory of a set of audio files, or\n (ii) a np.ndarray of shape (num_samples, sample_length)\n \"\"\"\n self.model = torch.hub.load(\"harritaylor/torchvggish\", \"vggish\")\n if not use_pca:\n self.model.postprocess = False\n if not use_activation:\n self.model.embeddings = nn.Sequential(\n *list(self.model.embeddings.children())[:-1]\n )\n self.model.eval()\n\n def get_embeddings(self, x, sr=16000, limit_num=None):\n \"\"\"\n Get embeddings using VGGish model.\n Params:\n -- x : Either\n (i) a string which is the directory of a set of audio files, or\n (ii) a list of np.ndarray audio samples\n -- sr : Sampling rate, if x is a list of audio samples. Default value is 16000.\n \"\"\"\n embd_lst = []\n if isinstance(x, list):\n try:\n for audio, sr in tqdm(x, disable=(not self.verbose)):\n embd = self.model.forward(audio, sr)\n if self.model.device == torch.device(\"cuda\"):\n embd = embd.cpu()\n embd = embd.detach().numpy()\n embd_lst.append(embd)\n except Exception as e:\n print(\n \"[Frechet Audio Distance] get_embeddings throw an exception: {}\".format(\n str(e)\n )\n )\n elif isinstance(x, str):\n if self.verbose:\n print(\"Calculating the embedding of the audio files inside %s\" % x)\n try:\n for i, fname in tqdm(\n enumerate(os.listdir(x)), disable=(not self.verbose)\n ):\n if fname.endswith(\".wav\"):\n if limit_num is not None and i > limit_num:\n break\n try:\n audio, sr = load_audio_task(os.path.join(x, fname))\n embd = self.model.forward(audio, sr)\n if self.model.device == torch.device(\"cuda\"):\n embd = embd.cpu()\n embd = embd.detach().numpy()\n embd_lst.append(embd)\n except Exception as e:\n print(e, fname)\n continue\n except Exception as e:\n print(\n \"[Frechet Audio Distance] get_embeddings throw an exception: {}\".format(\n str(e)\n )\n )\n else:\n raise AttributeError\n\n return np.concatenate(embd_lst, axis=0)\n\n def calculate_embd_statistics(self, embd_lst):\n if isinstance(embd_lst, list):\n embd_lst = np.array(embd_lst)\n mu = np.mean(embd_lst, axis=0)\n sigma = np.cov(embd_lst, rowvar=False)\n return mu, sigma\n\n def calculate_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6):\n \"\"\"\n Adapted from: https://github.com/mseitzer/pytorch-fid/blob/master/src/pytorch_fid/fid_score.py\n\n Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n Stable version by Dougal J. Sutherland.\n Params:\n -- mu1 : Numpy array containing the activations of a layer of the\n inception net (like returned by the function 'get_predictions')\n for generated samples.\n -- mu2 : The sample mean over activations, precalculated on an\n representative data set.\n -- sigma1: The covariance matrix over activations for generated samples.\n -- sigma2: The covariance matrix over activations, precalculated on an\n representative data set.\n Returns:\n -- : The Frechet Distance.\n \"\"\"\n\n mu1 = np.atleast_1d(mu1)\n mu2 = np.atleast_1d(mu2)\n\n sigma1 = np.atleast_2d(sigma1)\n sigma2 = np.atleast_2d(sigma2)\n\n assert (\n mu1.shape == mu2.shape\n ), \"Training and test mean vectors have different lengths\"\n assert (\n sigma1.shape == sigma2.shape\n ), \"Training and test covariances have different dimensions\"\n\n diff = mu1 - mu2\n\n # Product might be almost singular\n covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)\n if not np.isfinite(covmean).all():\n msg = (\n \"fid calculation produces singular product; \"\n \"adding %s to diagonal of cov estimates\"\n ) % eps\n print(msg)\n offset = np.eye(sigma1.shape[0]) * eps\n covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n raise ValueError(\"Imaginary component {}\".format(m))\n covmean = covmean.real\n\n tr_covmean = np.trace(covmean)\n\n return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean\n\n def __load_audio_files(self, dir):\n task_results = []\n\n pool = ThreadPool(self.audio_load_worker)\n pbar = tqdm(total=len(os.listdir(dir)), disable=(not self.verbose))\n\n def update(*a):\n pbar.update()\n\n if self.verbose:\n print(\"[Frechet Audio Distance] Loading audio from {}...\".format(dir))\n for fname in os.listdir(dir):\n res = pool.apply_async(\n load_audio_task, args=(os.path.join(dir, fname),), callback=update\n )\n\n task_results.append(res)\n pool.close()\n pool.join()\n\n return [k.get() for k in task_results]\n\n def score(self, background_dir, eval_dir, store_embds=False, limit_num=None):\n # background_dir: generated samples\n # eval_dir: groundtruth samples\n try:\n # audio_background = self.__load_audio_files(background_dir)\n # audio_eval = self.__load_audio_files(eval_dir)\n embds_background = self.get_embeddings(background_dir, limit_num=limit_num)\n embds_eval = self.get_embeddings(eval_dir, limit_num=limit_num)\n\n if store_embds:\n np.save(\"embds_background.npy\", embds_background)\n np.save(\"embds_eval.npy\", embds_eval)\n\n if len(embds_background) == 0:\n print(\n \"[Frechet Audio Distance] background set dir is empty, exitting...\"\n )\n return -1\n\n if len(embds_eval) == 0:\n print(\"[Frechet Audio Distance] eval set dir is empty, exitting...\")\n return -1\n\n mu_background, sigma_background = self.calculate_embd_statistics(\n embds_background\n )\n mu_eval, sigma_eval = self.calculate_embd_statistics(embds_eval)\n\n fad_score = self.calculate_frechet_distance(\n mu_background, sigma_background, mu_eval, sigma_eval\n )\n\n return {\"frechet_audio_distance\": fad_score}\n\n except Exception as e:\n print(\"[Frechet Audio Distance] exception thrown, {}\".format(str(e)))\n return -1" }, { "identifier": "calculate_fid", "path": "audioldm_eval/metrics/fid.py", "snippet": "def calculate_fid(\n featuresdict_1, featuresdict_2, feat_layer_name\n): # using 2048 layer to calculate\n eps = 1e-6\n features_1 = featuresdict_1[feat_layer_name]\n features_2 = featuresdict_2[feat_layer_name]\n\n assert torch.is_tensor(features_1) and features_1.dim() == 2\n assert torch.is_tensor(features_2) and features_2.dim() == 2\n\n stat_1 = {\n \"mu\": np.mean(features_1.numpy(), axis=0),\n \"sigma\": np.cov(features_1.numpy(), rowvar=False),\n }\n stat_2 = {\n \"mu\": np.mean(features_2.numpy(), axis=0),\n \"sigma\": np.cov(features_2.numpy(), rowvar=False),\n }\n\n # print(\"Computing Frechet Distance (PANNs)\")\n\n mu1, sigma1 = stat_1[\"mu\"], stat_1[\"sigma\"]\n mu2, sigma2 = stat_2[\"mu\"], stat_2[\"sigma\"]\n assert mu1.shape == mu2.shape and mu1.dtype == mu2.dtype\n assert sigma1.shape == sigma2.shape and sigma1.dtype == sigma2.dtype\n\n mu1 = np.atleast_1d(mu1)\n mu2 = np.atleast_1d(mu2)\n\n sigma1 = np.atleast_2d(sigma1)\n sigma2 = np.atleast_2d(sigma2)\n\n assert (\n mu1.shape == mu2.shape\n ), \"Training and test mean vectors have different lengths\"\n assert (\n sigma1.shape == sigma2.shape\n ), \"Training and test covariances have different dimensions\"\n\n diff = mu1 - mu2\n\n # Product might be almost singular\n covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False)\n if not np.isfinite(covmean).all():\n print(\n f\"WARNING: fid calculation produces singular product; adding {eps} to diagonal of cov\"\n )\n offset = np.eye(sigma1.shape[0]) * eps\n covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n assert False, \"Imaginary component {}\".format(m)\n covmean = covmean.real\n\n tr_covmean = np.trace(covmean)\n\n fid = diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean\n\n return {\n \"frechet_distance\": float(fid),\n }" }, { "identifier": "calculate_isc", "path": "audioldm_eval/metrics/isc.py", "snippet": "def calculate_isc(featuresdict, feat_layer_name, rng_seed, samples_shuffle, splits):\n # print(\"Computing Inception Score\")\n \n features = featuresdict[feat_layer_name]\n\n assert torch.is_tensor(features) and features.dim() == 2\n N, C = features.shape\n if samples_shuffle:\n rng = np.random.RandomState(rng_seed)\n features = features[rng.permutation(N), :]\n features = features.double()\n\n p = features.softmax(dim=1)\n log_p = features.log_softmax(dim=1)\n\n scores = []\n for i in range(splits):\n p_chunk = p[(i * N // splits) : ((i + 1) * N // splits), :] # 一部分的预测概率\n log_p_chunk = log_p[(i * N // splits) : ((i + 1) * N // splits), :] # log\n q_chunk = p_chunk.mean(dim=0, keepdim=True) # 概率的均值\n kl = p_chunk * (log_p_chunk - q_chunk.log()) #\n kl = kl.sum(dim=1).mean().exp().item()\n scores.append(kl)\n # print(\"scores\",scores)\n return {\n \"inception_score_mean\": float(np.mean(scores)),\n \"inception_score_std\": float(np.std(scores)),\n }" }, { "identifier": "calculate_kid", "path": "audioldm_eval/metrics/kid.py", "snippet": "def calculate_kid(\n featuresdict_1,\n featuresdict_2,\n subsets,\n subset_size,\n degree,\n gamma,\n coef0,\n rng_seed,\n feat_layer_name,\n):\n features_1 = featuresdict_1[feat_layer_name]\n features_2 = featuresdict_2[feat_layer_name]\n\n assert torch.is_tensor(features_1) and features_1.dim() == 2\n assert torch.is_tensor(features_2) and features_2.dim() == 2\n assert features_1.shape[1] == features_2.shape[1]\n if subset_size > len(features_2):\n print(\n f\"WARNING: subset size ({subset_size}) is larger than feature length ({len(features_2)}). \",\n \"Using\",\n len(features_2),\n \"for both datasets\",\n )\n subset_size = len(features_2)\n if subset_size > len(features_1):\n print(\n f\"WARNING: subset size ({subset_size}) is larger than feature length ({len(features_1)}). \",\n \"Using\",\n len(features_1),\n \"for both datasets\",\n )\n subset_size = len(features_1)\n\n features_1 = features_1.cpu().numpy()\n features_2 = features_2.cpu().numpy()\n\n mmds = np.zeros(subsets)\n rng = np.random.RandomState(rng_seed)\n\n for i in tqdm(\n range(subsets),\n leave=False,\n unit=\"subsets\",\n desc=\"Computing Kernel Inception Distance\",\n ):\n f1 = features_1[rng.choice(len(features_1), subset_size, replace=False)]\n f2 = features_2[rng.choice(len(features_2), subset_size, replace=False)]\n o = polynomial_mmd(f1, f2, degree, gamma, coef0)\n mmds[i] = o\n\n return {\n \"kernel_inception_distance_mean\": float(np.mean(mmds)),\n \"kernel_inception_distance_std\": float(np.std(mmds)),\n }" }, { "identifier": "calculate_kl", "path": "audioldm_eval/metrics/kl.py", "snippet": "def calculate_kl(featuresdict_1, featuresdict_2, feat_layer_name, same_name=True):\n # test_input(featuresdict_1, featuresdict_2, feat_layer_name, dataset_name, classes)\n if not same_name:\n return (\n {\n \"kullback_leibler_divergence_sigmoid\": float(-1),\n \"kullback_leibler_divergence_softmax\": float(-1),\n },\n None,\n None,\n )\n\n # print('KL: Assuming that `input2` is \"pseudo\" target and `input1` is prediction. KL(input2_i||input1_i)')\n EPS = 1e-6\n features_1 = featuresdict_1[feat_layer_name]\n features_2 = featuresdict_2[feat_layer_name]\n # # print('features_1 ',features_1.shape) # the predicted (num*10, class_num)\n # # print('features_2 ',features_2.shape) # the true\n paths_1 = [os.path.basename(x) for x in featuresdict_1[\"file_path_\"]]\n paths_2 = [os.path.basename(x) for x in featuresdict_2[\"file_path_\"]]\n # # print('paths_1 ',len(paths_1)) its path\n # # print('paths_2 ',len(paths_2))\n path_to_feats_1 = {p: f for p, f in zip(paths_1, features_1)}\n # #print(path_to_feats_1)\n path_to_feats_2 = {p: f for p, f in zip(paths_2, features_2)}\n # # dataset_name: caps\n # # in input1 (fakes) can have multiple samples per video, while input2 has only one real\n # sharedkey_to_feats_1 = {path_to_sharedkey(p, dataset_name, classes): [] for p in paths_1}\n sharedkey_to_feats_1 = {p: path_to_feats_1[p] for p in paths_1}\n sharedkey_to_feats_2 = {p: path_to_feats_2[p] for p in paths_2}\n # sharedkey_to_feats_2 = {path_to_sharedkey(p, dataset_name, classes):path_to_feats_2[p] for p in paths_1}\n\n features_1 = []\n features_2 = []\n\n for sharedkey, feat_2 in sharedkey_to_feats_2.items():\n # print(\"feat_2\",feat_2)\n if sharedkey not in sharedkey_to_feats_1.keys():\n print(\"%s is not in the generation result\" % sharedkey)\n continue\n features_1.extend([sharedkey_to_feats_1[sharedkey]])\n # print(\"feature_step\",len(features_1))\n # print(\"share\",sharedkey_to_feats_1[sharedkey])\n # just replicating the ground truth logits to compare with multiple samples in prediction\n # samples_num = len(sharedkey_to_feats_1[sharedkey])\n features_2.extend([feat_2])\n\n features_1 = torch.stack(features_1, dim=0)\n features_2 = torch.stack(features_2, dim=0)\n\n kl_ref = torch.nn.functional.kl_div(\n (features_1.softmax(dim=1) + EPS).log(),\n features_2.softmax(dim=1),\n reduction=\"none\",\n ) / len(features_1)\n kl_ref = torch.mean(kl_ref, dim=-1)\n\n # AudioGen use this formulation\n kl_softmax = torch.nn.functional.kl_div(\n (features_1.softmax(dim=1) + EPS).log(),\n features_2.softmax(dim=1),\n reduction=\"sum\",\n ) / len(features_1)\n\n # For multi-class audio clips, this formulation could be better\n kl_sigmoid = torch.nn.functional.kl_div(\n (features_1.sigmoid() + EPS).log(), features_2.sigmoid(), reduction=\"sum\"\n ) / len(features_1)\n\n return (\n {\n \"kullback_leibler_divergence_sigmoid\": float(kl_sigmoid),\n \"kullback_leibler_divergence_softmax\": float(kl_softmax),\n },\n kl_ref,\n paths_1,\n )" }, { "identifier": "Cnn14", "path": "audioldm_eval/feature_extractors/panns/models.py", "snippet": "class Cnn14(nn.Module):\n def __init__(\n self,\n features_list,\n sample_rate,\n window_size,\n hop_size,\n mel_bins,\n fmin,\n fmax,\n classes_num,\n ):\n\n super(Cnn14, self).__init__()\n\n window = \"hann\"\n center = True\n pad_mode = \"reflect\"\n ref = 1.0\n amin = 1e-10\n top_db = None\n\n self.features_list = features_list\n\n # Spectrogram extractor\n self.spectrogram_extractor = Spectrogram(\n n_fft=window_size,\n hop_length=hop_size,\n win_length=window_size,\n window=window,\n center=center,\n pad_mode=pad_mode,\n freeze_parameters=True,\n )\n\n # Logmel feature extractor\n self.logmel_extractor = LogmelFilterBank(\n sr=sample_rate,\n n_fft=window_size,\n n_mels=mel_bins,\n fmin=fmin,\n fmax=fmax,\n ref=ref,\n amin=amin,\n top_db=top_db,\n freeze_parameters=True,\n )\n\n # Spec augmenter\n self.spec_augmenter = SpecAugmentation(\n time_drop_width=64,\n time_stripes_num=2,\n freq_drop_width=8,\n freq_stripes_num=2,\n )\n\n self.bn0 = nn.BatchNorm2d(64)\n\n self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)\n self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)\n self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)\n self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)\n self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)\n self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)\n\n self.fc1 = nn.Linear(2048, 2048, bias=True)\n self.fc_audioset = nn.Linear(2048, classes_num, bias=True)\n\n if not os.path.exists(\"ckpt/Cnn14_mAP=0.431.pth\"):\n print(\"Download pretrained checkpoints of Cnn14.\")\n os.makedirs(\"ckpt\", exist_ok=True)\n os.system(\n \"wget -P ckpt/ %s\"\n % (\"https://zenodo.org/record/3576403/files/Cnn14_mAP%3D0.431.pth\")\n )\n os.system(\n \"wget -P ckpt/ %s\"\n % (\"https://zenodo.org/record/3987831/files/Cnn14_16k_mAP%3D0.438.pth\")\n )\n\n # self.init_weight()\n if sample_rate == 16000:\n state_dict = torch.load(\"ckpt/Cnn14_16k_mAP=0.438.pth\")\n self.load_state_dict(state_dict[\"model\"])\n elif sample_rate == 32000:\n state_dict = torch.load(\"ckpt/Cnn14_mAP=0.431.pth\")\n self.load_state_dict(state_dict[\"model\"])\n\n def init_weight(self):\n init_bn(self.bn0)\n init_layer(self.fc1)\n init_layer(self.fc_audioset)\n\n # def convert_features_tuple_to_dict(self, features):\n # \"\"\"\n # The only compound return type of the forward function amenable to JIT tracing is tuple.\n # This function simply helps to recover the mapping.\n # \"\"\"\n # message = 'Features must be the output of forward function'\n # assert type(features) is tuple and len(features) == len(self.features_list), message\n # return dict(((name, feature) for name, feature in zip(self.features_list, features)))\n\n def forward(self, input, mixup_lambda=None):\n \"\"\"\n Input: (batch_size, data_length)\"\"\"\n\n x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)\n x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)\n\n x = x.transpose(1, 3)\n x = self.bn0(x)\n x = x.transpose(1, 3)\n embeddings = []\n\n if self.training:\n x = self.spec_augmenter(x)\n\n # Mixup on spectrogram\n if self.training and mixup_lambda is not None:\n x = do_mixup(x, mixup_lambda)\n\n x = self.conv_block1(x, pool_size=(2, 2), pool_type=\"avg\")\n embeddings.append(x)\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block2(x, pool_size=(2, 2), pool_type=\"avg\")\n embeddings.append(x)\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block3(x, pool_size=(2, 2), pool_type=\"avg\")\n embeddings.append(x)\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block4(x, pool_size=(2, 2), pool_type=\"avg\")\n embeddings.append(x)\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block5(x, pool_size=(2, 2), pool_type=\"avg\")\n embeddings.append(x)\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.conv_block6(x, pool_size=(1, 1), pool_type=\"avg\")\n embeddings.append(x)\n x = F.dropout(x, p=0.2, training=self.training)\n x = torch.mean(x, dim=3)\n\n (x1, _) = torch.max(x, dim=2)\n x2 = torch.mean(x, dim=2)\n x = x1 + x2\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.fc1(x)\n x = F.dropout(x, p=0.5, training=self.training) # .clone()\n embedding = F.relu_(x).clone()\n logits = self.fc_audioset(F.relu_(x)).clone()\n clipwise_output = torch.sigmoid(logits)\n output_dict = {\n \"logits\": logits,\n \"2048\": embedding,\n \"clipwise_output\": clipwise_output,\n }\n\n return output_dict" }, { "identifier": "save_pickle", "path": "audioldm_eval/audio/tools.py", "snippet": "def save_pickle(obj, fname):\n # print(\"Save pickle at \" + fname)\n with open(fname, \"wb\") as f:\n pickle.dump(obj, f)" }, { "identifier": "load_pickle", "path": "audioldm_eval/audio/tools.py", "snippet": "def load_pickle(fname):\n # print(\"Load pickle at \" + fname)\n with open(fname, \"rb\") as f:\n res = pickle.load(f)\n return res" }, { "identifier": "write_json", "path": "audioldm_eval/audio/tools.py", "snippet": "def write_json(my_dict, fname):\n # print(\"Save json file at \" + fname)\n json_str = json.dumps(my_dict)\n with open(fname, \"w\") as json_file:\n json_file.write(json_str)" }, { "identifier": "load_json", "path": "audioldm_eval/audio/tools.py", "snippet": "def load_json(fname):\n with open(fname, \"r\") as f:\n data = json.load(f)\n return data" } ]
import os import numpy as np import argparse import torch import audioldm_eval.audio as Audio import time import ipdb from audioldm_eval.datasets.load_mel import load_npy_data, MelPairedDataset, WaveDataset from torch.utils.data import DataLoader from tqdm import tqdm from audioldm_eval.metrics.fad import FrechetAudioDistance from audioldm_eval import calculate_fid, calculate_isc, calculate_kid, calculate_kl from skimage.metrics import peak_signal_noise_ratio as psnr from skimage.metrics import structural_similarity as ssim from audioldm_eval.feature_extractors.panns import Cnn14 from audioldm_eval.audio.tools import save_pickle, load_pickle, write_json, load_json from ssr_eval.metrics import AudioMetrics
10,976
num_workers=num_workers, ) resultloader = DataLoader( WaveDataset( groundtruth_path, self.sampling_rate, limit_num=limit_num, ), batch_size=1, sampler=None, num_workers=num_workers, ) pairedloader = DataLoader( MelPairedDataset( generate_files_path, groundtruth_path, self._stft, self.sampling_rate, self.fbin_mean, self.fbin_std, limit_num=limit_num, ), batch_size=1, sampler=None, num_workers=16, ) out = {} metric_lsd = self.calculate_lsd(pairedloader, same_name=same_name) out.update(metric_lsd) featuresdict_2 = self.get_featuresdict(resultloader) featuresdict_1 = self.get_featuresdict(outputloader) # if cfg.have_kl: metric_psnr_ssim = self.calculate_psnr_ssim(pairedloader, same_name=same_name) out.update(metric_psnr_ssim) metric_kl, kl_ref, paths_1 = calculate_kl( featuresdict_1, featuresdict_2, "logits", same_name ) out.update(metric_kl) metric_isc = calculate_isc( featuresdict_1, feat_layer_name="logits", splits=10, samples_shuffle=True, rng_seed=2020, ) out.update(metric_isc) metric_fid = calculate_fid( featuresdict_1, featuresdict_2, feat_layer_name="2048" ) out.update(metric_fid) # Gen, target fad_score = self.frechet.score(generate_files_path, groundtruth_path, limit_num=limit_num) out.update(fad_score) metric_kid = calculate_kid( featuresdict_1, featuresdict_2, feat_layer_name="2048", subsets=100, subset_size=1000, degree=3, gamma=None, coef0=1, rng_seed=2020, ) out.update(metric_kid) ''' print("\n".join((f"{k}: {v:.7f}" for k, v in out.items()))) print("\n") print(limit_num) print( f'KL_Sigmoid: {out.get("kullback_leibler_divergence_sigmoid", float("nan")):8.5f};', f'KL: {out.get("kullback_leibler_divergence_softmax", float("nan")):8.5f};', f'PSNR: {out.get("psnr", float("nan")):.5f}', f'SSIM: {out.get("ssim", float("nan")):.5f}', f'ISc: {out.get("inception_score_mean", float("nan")):8.5f} ({out.get("inception_score_std", float("nan")):5f});', f'KID: {out.get("kernel_inception_distance_mean", float("nan")):.5f}', f'({out.get("kernel_inception_distance_std", float("nan")):.5f})', f'FD: {out.get("frechet_distance", float("nan")):8.5f};', f'FAD: {out.get("frechet_audio_distance", float("nan")):.5f}', f'LSD: {out.get("lsd", float("nan")):.5f}', f'SSIM_STFT: {out.get("ssim_stft", float("nan")):.5f}', ) ''' result = { "frechet_distance": out.get("frechet_distance", float("nan")), "frechet_audio_distance": out.get("frechet_audio_distance", float("nan")), "kl_sigmoid": out.get( "kullback_leibler_divergence_sigmoid", float("nan") ), "kl_softmax": out.get( "kullback_leibler_divergence_softmax", float("nan") ), "lsd": out.get("lsd", float("nan")), "psnr": out.get("psnr", float("nan")), "ssim": out.get("ssim", float("nan")), "ssim_stft": out.get("ssim_stft", float("nan")), "is_mean": out.get("inception_score_mean", float("nan")), "is_std": out.get("inception_score_std", float("nan")), "kid_mean": out.get( "kernel_inception_distance_mean", float("nan") ), "kid_std": out.get( "kernel_inception_distance_std", float("nan") ), } result = {k: round(v, 4) for k, v in result.items()} json_path = generate_files_path + "_evaluation_results.json"
class EvaluationHelper: def __init__(self, sampling_rate, device, backbone="cnn14") -> None: self.device = device self.backbone = backbone self.sampling_rate = sampling_rate self.frechet = FrechetAudioDistance( use_pca=False, use_activation=False, verbose=False, ) self.lsd_metric = AudioMetrics(self.sampling_rate) self.frechet.model = self.frechet.model.to(device) features_list = ["2048", "logits"] if self.sampling_rate == 16000: self.mel_model = Cnn14( features_list=features_list, sample_rate=16000, window_size=512, hop_size=160, mel_bins=64, fmin=50, fmax=8000, classes_num=527, ) elif self.sampling_rate == 32000: self.mel_model = Cnn14( features_list=features_list, sample_rate=32000, window_size=1024, hop_size=320, mel_bins=64, fmin=50, fmax=14000, classes_num=527, ) else: raise ValueError( "We only support the evaluation on 16kHz and 32kHz sampling rate." ) if self.sampling_rate == 16000: self._stft = Audio.TacotronSTFT(512, 160, 512, 64, 16000, 50, 8000) elif self.sampling_rate == 32000: self._stft = Audio.TacotronSTFT(1024, 320, 1024, 64, 32000, 50, 14000) else: raise ValueError( "We only support the evaluation on 16kHz and 32kHz sampling rate." ) self.mel_model.eval() self.mel_model.to(self.device) self.fbin_mean, self.fbin_std = None, None def main( self, generate_files_path, groundtruth_path, limit_num=None, ): self.file_init_check(generate_files_path) self.file_init_check(groundtruth_path) same_name = self.get_filename_intersection_ratio( generate_files_path, groundtruth_path, limit_num=limit_num ) metrics = self.calculate_metrics(generate_files_path, groundtruth_path, same_name, limit_num) return metrics def file_init_check(self, dir): assert os.path.exists(dir), "The path does not exist %s" % dir assert len(os.listdir(dir)) > 1, "There is no files in %s" % dir def get_filename_intersection_ratio( self, dir1, dir2, threshold=0.99, limit_num=None ): self.datalist1 = [os.path.join(dir1, x) for x in os.listdir(dir1)] self.datalist1 = sorted(self.datalist1) self.datalist1 = [item for item in self.datalist1 if item.endswith(".wav")] self.datalist2 = [os.path.join(dir2, x) for x in os.listdir(dir2)] self.datalist2 = sorted(self.datalist2) self.datalist2 = [item for item in self.datalist2 if item.endswith(".wav")] data_dict1 = {os.path.basename(x): x for x in self.datalist1} data_dict2 = {os.path.basename(x): x for x in self.datalist2} keyset1 = set(data_dict1.keys()) keyset2 = set(data_dict2.keys()) intersect_keys = keyset1.intersection(keyset2) if ( len(intersect_keys) / len(keyset1) > threshold and len(intersect_keys) / len(keyset2) > threshold ): ''' print( "+Two path have %s intersection files out of total %s & %s files. Processing two folder with same_name=True" % (len(intersect_keys), len(keyset1), len(keyset2)) ) ''' return True else: ''' print( "-Two path have %s intersection files out of total %s & %s files. Processing two folder with same_name=False" % (len(intersect_keys), len(keyset1), len(keyset2)) ) ''' return False def calculate_lsd(self, pairedloader, same_name=True, time_offset=160 * 7): if same_name == False: return { "lsd": -1, "ssim_stft": -1, } # print("Calculating LSD using a time offset of %s ..." % time_offset) lsd_avg = [] ssim_stft_avg = [] for _, _, filename, (audio1, audio2) in tqdm(pairedloader, leave=False): audio1 = audio1.cpu().numpy()[0, 0] audio2 = audio2.cpu().numpy()[0, 0] # If you use HIFIGAN (verified on 2023-01-12), you need seven frames' offset audio1 = audio1[time_offset:] audio1 = audio1 - np.mean(audio1) audio2 = audio2 - np.mean(audio2) audio1 = audio1 / np.max(np.abs(audio1)) audio2 = audio2 / np.max(np.abs(audio2)) min_len = min(audio1.shape[0], audio2.shape[0]) audio1, audio2 = audio1[:min_len], audio2[:min_len] try: result = self.lsd(audio1, audio2) lsd_avg.append(result["lsd"]) ssim_stft_avg.append(result["ssim"]) except: continue return {"lsd": np.mean(lsd_avg), "ssim_stft": np.mean(ssim_stft_avg)} def lsd(self, audio1, audio2): result = self.lsd_metric.evaluation(audio1, audio2, None) return result def calculate_psnr_ssim(self, pairedloader, same_name=True): if same_name == False: return {"psnr": -1, "ssim": -1} psnr_avg = [] ssim_avg = [] for mel_gen, mel_target, filename, _ in tqdm(pairedloader, leave=False): mel_gen = mel_gen.cpu().numpy()[0] mel_target = mel_target.cpu().numpy()[0] psnrval = psnr(mel_gen, mel_target) if np.isinf(psnrval): print("Infinite value encountered in psnr %s " % filename) continue psnr_avg.append(psnrval) ssim_avg.append(ssim(mel_gen, mel_target)) return {"psnr": np.mean(psnr_avg), "ssim": np.mean(ssim_avg)} def calculate_metrics(self, generate_files_path, groundtruth_path, same_name, limit_num=None): # Generation, target torch.manual_seed(0) num_workers = 0 outputloader = DataLoader( WaveDataset( generate_files_path, self.sampling_rate, limit_num=limit_num, ), batch_size=1, sampler=None, num_workers=num_workers, ) resultloader = DataLoader( WaveDataset( groundtruth_path, self.sampling_rate, limit_num=limit_num, ), batch_size=1, sampler=None, num_workers=num_workers, ) pairedloader = DataLoader( MelPairedDataset( generate_files_path, groundtruth_path, self._stft, self.sampling_rate, self.fbin_mean, self.fbin_std, limit_num=limit_num, ), batch_size=1, sampler=None, num_workers=16, ) out = {} metric_lsd = self.calculate_lsd(pairedloader, same_name=same_name) out.update(metric_lsd) featuresdict_2 = self.get_featuresdict(resultloader) featuresdict_1 = self.get_featuresdict(outputloader) # if cfg.have_kl: metric_psnr_ssim = self.calculate_psnr_ssim(pairedloader, same_name=same_name) out.update(metric_psnr_ssim) metric_kl, kl_ref, paths_1 = calculate_kl( featuresdict_1, featuresdict_2, "logits", same_name ) out.update(metric_kl) metric_isc = calculate_isc( featuresdict_1, feat_layer_name="logits", splits=10, samples_shuffle=True, rng_seed=2020, ) out.update(metric_isc) metric_fid = calculate_fid( featuresdict_1, featuresdict_2, feat_layer_name="2048" ) out.update(metric_fid) # Gen, target fad_score = self.frechet.score(generate_files_path, groundtruth_path, limit_num=limit_num) out.update(fad_score) metric_kid = calculate_kid( featuresdict_1, featuresdict_2, feat_layer_name="2048", subsets=100, subset_size=1000, degree=3, gamma=None, coef0=1, rng_seed=2020, ) out.update(metric_kid) ''' print("\n".join((f"{k}: {v:.7f}" for k, v in out.items()))) print("\n") print(limit_num) print( f'KL_Sigmoid: {out.get("kullback_leibler_divergence_sigmoid", float("nan")):8.5f};', f'KL: {out.get("kullback_leibler_divergence_softmax", float("nan")):8.5f};', f'PSNR: {out.get("psnr", float("nan")):.5f}', f'SSIM: {out.get("ssim", float("nan")):.5f}', f'ISc: {out.get("inception_score_mean", float("nan")):8.5f} ({out.get("inception_score_std", float("nan")):5f});', f'KID: {out.get("kernel_inception_distance_mean", float("nan")):.5f}', f'({out.get("kernel_inception_distance_std", float("nan")):.5f})', f'FD: {out.get("frechet_distance", float("nan")):8.5f};', f'FAD: {out.get("frechet_audio_distance", float("nan")):.5f}', f'LSD: {out.get("lsd", float("nan")):.5f}', f'SSIM_STFT: {out.get("ssim_stft", float("nan")):.5f}', ) ''' result = { "frechet_distance": out.get("frechet_distance", float("nan")), "frechet_audio_distance": out.get("frechet_audio_distance", float("nan")), "kl_sigmoid": out.get( "kullback_leibler_divergence_sigmoid", float("nan") ), "kl_softmax": out.get( "kullback_leibler_divergence_softmax", float("nan") ), "lsd": out.get("lsd", float("nan")), "psnr": out.get("psnr", float("nan")), "ssim": out.get("ssim", float("nan")), "ssim_stft": out.get("ssim_stft", float("nan")), "is_mean": out.get("inception_score_mean", float("nan")), "is_std": out.get("inception_score_std", float("nan")), "kid_mean": out.get( "kernel_inception_distance_mean", float("nan") ), "kid_std": out.get( "kernel_inception_distance_std", float("nan") ), } result = {k: round(v, 4) for k, v in result.items()} json_path = generate_files_path + "_evaluation_results.json"
write_json(result, json_path)
11
2023-11-14 23:29:31+00:00
16k
BraveGroup/Drive-WM
src/diffusers/schedulers/scheduling_dpmsolver_multistep.py
[ { "identifier": "ConfigMixin", "path": "src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "register_to_config", "path": "src/diffusers/configuration_utils.py", "snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)" }, { "identifier": "deprecate", "path": "src/diffusers/utils/deprecation_utils.py", "snippet": "def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2):\n from .. import __version__\n\n deprecated_kwargs = take_from\n values = ()\n if not isinstance(args[0], tuple):\n args = (args,)\n\n for attribute, version_name, message in args:\n if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):\n raise ValueError(\n f\"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'\"\n f\" version {__version__} is >= {version_name}\"\n )\n\n warning = None\n if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:\n values += (deprecated_kwargs.pop(attribute),)\n warning = f\"The `{attribute}` argument is deprecated and will be removed in version {version_name}.\"\n elif hasattr(deprecated_kwargs, attribute):\n values += (getattr(deprecated_kwargs, attribute),)\n warning = f\"The `{attribute}` attribute is deprecated and will be removed in version {version_name}.\"\n elif deprecated_kwargs is None:\n warning = f\"`{attribute}` is deprecated and will be removed in version {version_name}.\"\n\n if warning is not None:\n warning = warning + \" \" if standard_warn else \"\"\n warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel)\n\n if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:\n call_frame = inspect.getouterframes(inspect.currentframe())[1]\n filename = call_frame.filename\n line_number = call_frame.lineno\n function = call_frame.function\n key, value = next(iter(deprecated_kwargs.items()))\n raise TypeError(f\"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`\")\n\n if len(values) == 0:\n return\n elif len(values) == 1:\n return values[0]\n return values" }, { "identifier": "randn_tensor", "path": "src/diffusers/utils/torch_utils.py", "snippet": "def randn_tensor(\n shape: Union[Tuple, List],\n generator: Optional[Union[List[\"torch.Generator\"], \"torch.Generator\"]] = None,\n device: Optional[\"torch.device\"] = None,\n dtype: Optional[\"torch.dtype\"] = None,\n layout: Optional[\"torch.layout\"] = None,\n):\n \"\"\"A helper function to create random tensors on the desired `device` with the desired `dtype`. When\n passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor\n is always created on the CPU.\n \"\"\"\n # device on which tensor is created defaults to device\n rand_device = device\n batch_size = shape[0]\n\n layout = layout or torch.strided\n device = device or torch.device(\"cpu\")\n\n if generator is not None:\n gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type\n if gen_device_type != device.type and gen_device_type == \"cpu\":\n rand_device = \"cpu\"\n if device != \"mps\":\n logger.info(\n f\"The passed generator was created on 'cpu' even though a tensor on {device} was expected.\"\n f\" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably\"\n f\" slighly speed up this function by passing a generator that was created on the {device} device.\"\n )\n elif gen_device_type != device.type and gen_device_type == \"cuda\":\n raise ValueError(f\"Cannot generate a {device} tensor from a generator of type {gen_device_type}.\")\n\n # make sure generator list of length 1 is treated like a non-list\n if isinstance(generator, list) and len(generator) == 1:\n generator = generator[0]\n\n if isinstance(generator, list):\n shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)\n\n return latents" }, { "identifier": "KarrasDiffusionSchedulers", "path": "src/diffusers/schedulers/scheduling_utils.py", "snippet": "class KarrasDiffusionSchedulers(Enum):\n DDIMScheduler = 1\n DDPMScheduler = 2\n PNDMScheduler = 3\n LMSDiscreteScheduler = 4\n EulerDiscreteScheduler = 5\n HeunDiscreteScheduler = 6\n EulerAncestralDiscreteScheduler = 7\n DPMSolverMultistepScheduler = 8\n DPMSolverSinglestepScheduler = 9\n KDPM2DiscreteScheduler = 10\n KDPM2AncestralDiscreteScheduler = 11\n DEISMultistepScheduler = 12\n UniPCMultistepScheduler = 13\n DPMSolverSDEScheduler = 14" }, { "identifier": "SchedulerMixin", "path": "src/diffusers/schedulers/scheduling_utils.py", "snippet": "class SchedulerMixin(PushToHubMixin):\n \"\"\"\n Base class for all schedulers.\n\n [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving\n functionalities.\n\n [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to\n the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler\n class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the scheduler\n configuration saved with [`~SchedulerMixin.save_pretrained`].\n subfolder (`str`, *optional*):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`. You can also activate the special\n [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a\n firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs, commit_hash = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n return_commit_hash=True,\n **kwargs,\n )\n return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to a directory so that it can be reloaded using the\n [`~SchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes" }, { "identifier": "SchedulerOutput", "path": "src/diffusers/schedulers/scheduling_utils.py", "snippet": "class SchedulerOutput(BaseOutput):\n \"\"\"\n Base class for the output of a scheduler's `step` function.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n \"\"\"\n\n prev_sample: torch.FloatTensor" } ]
import math import numpy as np import torch from typing import List, Optional, Tuple, Union from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
12,267
elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) return x_t def multistep_dpm_solver_third_order_update( self, model_output_list: List[torch.FloatTensor], *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: """ One step for the third-order multistep DPMSolver. Args: model_output_list (`List[torch.FloatTensor]`): The direct outputs from learned diffusion model at current and latter timesteps. sample (`torch.FloatTensor`): A current instance of a sample created by diffusion process. Returns: `torch.FloatTensor`: The sample tensor at the previous timestep. """ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(" missing`sample` as a required keyward argument") if timestep_list is not None: deprecate( "timestep_list", "1.0.0", "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 r0, r1 = h_0 / h, h_1 / h D0 = m0 D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) if self.config.algorithm_type == "dpmsolver++": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 ) elif self.config.algorithm_type == "dpmsolver": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 ) return x_t def _init_step_index(self, timestep): if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) index_candidates = (self.timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() self._step_index = step_index def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator=None, return_dict: bool = True,
# Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): """ `DPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. solver_order (`int`, defaults to 2): The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++"`. algorithm_type (`str`, defaults to `dpmsolver++`): Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) paper, and the `dpmsolver++` type implements the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. solver_type (`str`, defaults to `midpoint`): Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. lower_order_final (`bool`, defaults to `True`): Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. euler_at_final (`bool`, defaults to `False`): Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference steps, but sometimes may result in blurring. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. use_lu_lambdas (`bool`, *optional*, defaults to `False`): Whether to use the uniform-logSNR for step sizes proposed by Lu's DPM-Solver in the noise schedule during the sampling process. If `True`, the sigmas and time steps are determined according to a sequence of `lambda(t)`. lambda_min_clipped (`float`, defaults to `-inf`): Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the cosine (`squaredcos_cap_v2`) noise schedule. variance_type (`str`, *optional*): Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output contains the predicted Gaussian variance. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps. You can use a combination of `offset=1` and `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable Diffusion. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, euler_at_final: bool = False, use_karras_sigmas: Optional[bool] = False, use_lu_lambdas: Optional[bool] = False, lambda_min_clipped: float = -float("inf"), variance_type: Optional[str] = None, timestep_spacing: str = "linspace", steps_offset: int = 0, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # Currently we only support VP-type noise schedule self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # settings for DPM-Solver if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: if algorithm_type == "deis": self.register_to_config(algorithm_type="dpmsolver++") else: raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") if solver_type not in ["midpoint", "heun"]: if solver_type in ["logrho", "bh1", "bh2"]: self.register_to_config(solver_type="midpoint") else: raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") # setable values self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None @property def step_index(self): """ The index counter for current timestep. It will increae 1 after each scheduler step. """ return self._step_index def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ # Clipping the minimum of all lambda(t) for numerical stability. # This is critical for cosine (squaredcos_cap_v2) noise schedule. clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = last_timestep // (num_inference_steps + 1) # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) if self.config.use_karras_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) elif self.config.use_lu_lambdas: lambdas = np.flip(log_sigmas.copy()) lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps) sigmas = np.exp(lambdas) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [ None, ] * self.config.solver_order self.lower_order_nums = 0 # add an index counter for schedulers that allow duplicated timesteps self._step_index = None # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Karras et al. (2022).""" sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def _convert_to_lu(self, in_lambdas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Lu et al. (2022).""" lambda_min: float = in_lambdas[-1].item() lambda_max: float = in_lambdas[0].item() rho = 1.0 # 1.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = lambda_min ** (1 / rho) max_inv_rho = lambda_max ** (1 / rho) lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return lambdas def convert_model_output( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: """ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyward argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) # DPM-Solver++ needs to solve an integral of the data prediction model. if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: if self.config.prediction_type == "epsilon": # DPM-Solver and DPM-Solver++ only need the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: model_output = model_output[:, :3] sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == "sample": x0_pred = model_output elif self.config.prediction_type == "v_prediction": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the DPMSolverMultistepScheduler." ) if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred # DPM-Solver needs to solve an integral of the noise prediction model. elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: if self.config.prediction_type == "epsilon": # DPM-Solver and DPM-Solver++ only need the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == "sample": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == "v_prediction": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the DPMSolverMultistepScheduler." ) if self.config.thresholding: sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def dpm_solver_first_order_update( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, noise: Optional[torch.FloatTensor] = None, **kwargs, ) -> torch.FloatTensor: """ One step for the first-order DPMSolver (equivalent to DDIM). Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The sample tensor at the previous timestep. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(" missing `sample` as a required keyward argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == "dpmsolver++": x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output elif self.config.algorithm_type == "dpmsolver": x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output elif self.config.algorithm_type == "sde-dpmsolver++": assert noise is not None x_t = ( (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.algorithm_type == "sde-dpmsolver": assert noise is not None x_t = ( (alpha_t / alpha_s) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) return x_t def multistep_dpm_solver_second_order_update( self, model_output_list: List[torch.FloatTensor], *args, sample: torch.FloatTensor = None, noise: Optional[torch.FloatTensor] = None, **kwargs, ) -> torch.FloatTensor: """ One step for the second-order multistep DPMSolver. Args: model_output_list (`List[torch.FloatTensor]`): The direct outputs from learned diffusion model at current and latter timesteps. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The sample tensor at the previous timestep. """ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(" missing `sample` as a required keyward argument") if timestep_list is not None: deprecate( "timestep_list", "1.0.0", "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s0, sigma_s1 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) m0, m1 = model_output_list[-1], model_output_list[-2] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) if self.config.algorithm_type == "dpmsolver++": # See https://arxiv.org/abs/2211.01095 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 ) elif self.config.algorithm_type == "dpmsolver": # See https://arxiv.org/abs/2206.00927 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 ) elif self.config.algorithm_type == "sde-dpmsolver++": assert noise is not None if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.algorithm_type == "sde-dpmsolver": assert noise is not None if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * (torch.exp(h) - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) return x_t def multistep_dpm_solver_third_order_update( self, model_output_list: List[torch.FloatTensor], *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: """ One step for the third-order multistep DPMSolver. Args: model_output_list (`List[torch.FloatTensor]`): The direct outputs from learned diffusion model at current and latter timesteps. sample (`torch.FloatTensor`): A current instance of a sample created by diffusion process. Returns: `torch.FloatTensor`: The sample tensor at the previous timestep. """ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError(" missing`sample` as a required keyward argument") if timestep_list is not None: deprecate( "timestep_list", "1.0.0", "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 r0, r1 = h_0 / h, h_1 / h D0 = m0 D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) if self.config.algorithm_type == "dpmsolver++": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 ) elif self.config.algorithm_type == "dpmsolver": # See https://arxiv.org/abs/2206.00927 for detailed derivations x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 ) return x_t def _init_step_index(self, timestep): if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) index_candidates = (self.timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() self._step_index = step_index def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator=None, return_dict: bool = True,
) -> Union[SchedulerOutput, Tuple]:
6
2023-11-18 01:40:55+00:00
16k
BAAI-DCAI/SegVol
inference_demo.py
[ { "identifier": "sam_model_registry", "path": "segment_anything_volumetric/build_sam.py", "snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):" }, { "identifier": "SegVol", "path": "network/model.py", "snippet": "class SegVol(nn.Module):\n def __init__(self, \n image_encoder, \n mask_decoder,\n prompt_encoder,\n clip_ckpt,\n roi_size,\n patch_size,\n test_mode=False,\n ):\n super().__init__()\n self.image_encoder = image_encoder\n self.mask_decoder = mask_decoder\n self.prompt_encoder = prompt_encoder\n self.text_encoder = TextEncoder(clip_ckpt)\n self.feat_shape = np.array(roi_size)/np.array(patch_size)\n self.test_mode = test_mode\n self.dice_loss = BinaryDiceLoss().cuda()\n self.bce_loss = BCELoss().cuda()\n self.decoder_iter = 6\n\n def forward(self, image, text=None, boxes=None, points=None, **kwargs):\n bs = image.shape[0]\n img_shape = (image.shape[2], image.shape[3], image.shape[4])\n image_embedding, _ = self.image_encoder(image)\n image_embedding = image_embedding.transpose(1, 2).view(bs, -1, \n int(self.feat_shape[0]), int(self.feat_shape[1]), int(self.feat_shape[2]))\n # test mode\n if self.test_mode:\n return self.forward_decoder(image_embedding, img_shape, text, boxes, points)\n \n # train mode\n ## sl\n sl_loss = self.supervised_forward(image, image_embedding, img_shape, kwargs['train_organs'], kwargs['train_labels'])\n ## ssl\n ssl_loss = self.unsupervised_forward(image, image_embedding, kwargs['pseudo_seg_cleaned'], img_shape)\n return sl_loss, ssl_loss\n\n def forward_decoder(self, image_embedding, img_shape, text=None, boxes=None, points=None):\n with torch.no_grad():\n if boxes is not None:\n if len(boxes.shape) == 2:\n boxes = boxes[:, None, :] # (B, 1, 6)\n if text is not None:\n text_embedding = self.text_encoder(text) # (B, 768)\n else:\n text_embedding = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=None,\n text_embedding=text_embedding,\n )\n\n dense_pe = self.prompt_encoder.get_dense_pe()\n low_res_masks, _ = self.mask_decoder(\n image_embeddings=image_embedding,\n text_embedding = text_embedding,\n image_pe=dense_pe,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=False,\n )\n logits = F.interpolate(low_res_masks, size=img_shape, mode='trilinear', align_corners=False)\n return logits\n\n def supervised_forward(self, image, image_embedding, img_shape, training_organs, train_labels):\n iter_points, iter_bboxes, iter_organs = self.build_prompt_label(image.shape[0], training_organs, train_labels)\n # select prompt\n prompt_options = [[None, iter_points, iter_organs], [iter_bboxes, None, iter_organs], \n [None, None, iter_organs], [iter_bboxes, None, None], [None, iter_points, None],\n [iter_bboxes, iter_points, None]]\n sl_loss = 0\n for prompt in prompt_options:\n bboxes, points, organs = prompt\n logits = self.forward_decoder(image_embedding, img_shape, text=organs, boxes=bboxes, points=points)\n # cal loss\n sl_loss_dice = self.dice_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss_bce = self.bce_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss += sl_loss_dice + sl_loss_bce\n return sl_loss\n \n def unsupervised_forward(self, image, image_embedding, pseudo_seg_cleaned, img_shape):\n sll_loss = 0\n for iter in range(self.decoder_iter):\n if iter % 2 == 0:\n pseudo_labels, pseudo_points_prompt = self.build_pseudo_point_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=None, points=pseudo_points_prompt)\n else:\n pseudo_labels, pseudo_bboxes_prompt = self.build_pseudo_box_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=pseudo_bboxes_prompt, points=None)\n # cal loss\n sll_loss_dice = self.dice_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss_bce = self.bce_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss += sll_loss_dice + sll_loss_bce\n return sll_loss\n\n def build_prompt_label(self, bs, training_organs, train_labels):\n # generate prompt & label\n iter_organs = []\n iter_bboxes = []\n iter_points_ax = []\n iter_point_labels = []\n for sample_idx in range(bs):\n # organ prompt\n iter_organs.append(training_organs)\n # box prompt\n box = generate_box(train_labels[sample_idx])\n iter_bboxes.append(box)\n # point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(0, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n point, point_label = select_points(\n train_labels[sample_idx],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n iter_points_ax.append(point)\n iter_point_labels.append(point_label)\n # batched prompt\n iter_points_ax = torch.stack(iter_points_ax, dim=0).cuda()\n iter_point_labels = torch.stack(iter_point_labels, dim=0).cuda()\n iter_points = (iter_points_ax, iter_point_labels)\n iter_bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return iter_points, iter_bboxes, iter_organs\n \n def build_pseudo_point_prompt_label(self, input_shape, seg_labels):\n pseudo_labels = torch.zeros(input_shape).cuda()\n # generate points\n points = []\n point_labels = []\n for batch_idx in range(input_shape[0]):\n # generate pseudo label\n unique_ids = torch.unique(seg_labels[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels[batch_idx]==region_id] = 1\n # generate point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(4, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n assert len(pseudo_labels[batch_idx][0].shape) == 3\n point, point_label = select_points(\n pseudo_labels[batch_idx][0],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n points.append(point)\n point_labels.append(point_label)\n points = torch.stack(points, dim=0).cuda()\n point_labels = torch.stack(point_labels, dim=0).cuda()\n pseudo_points_prompt = (points, point_labels)\n return pseudo_labels, pseudo_points_prompt\n\n def build_pseudo_box_prompt_label(self, input_shape, seg_labels_cleaned):\n pseudo_labels = torch.zeros(input_shape).cuda()\n iter_bboxes = []\n # generate boxes\n for batch_idx in range(input_shape[0]):\n # generate ori pseudo label\n unique_ids = torch.unique(seg_labels_cleaned[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==region_id] = 1\n # generate box prompt\n box = generate_box(pseudo_labels[batch_idx][0])\n iter_bboxes.append(box)\n # refine pseudo label\n x_min, y_min, z_min, x_max, y_max, z_max = box\n binary_cube = torch.zeros_like(pseudo_labels[batch_idx][0]).int()\n binary_cube[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1] = 1\n # cal iou\n mask_label = seg_labels_cleaned[batch_idx][0]\n assert binary_cube.shape == mask_label.shape, str(binary_cube.shape) + ' ' + str(mask_label.shape)\n mask_values_in_binary_cube = mask_label[binary_cube == 1]\n unique_mask_values = torch.unique(mask_values_in_binary_cube)\n # print('unique_mask_values ', unique_mask_values)\n for value in unique_mask_values:\n if value == -1: continue\n mask_area = (mask_label == value)\n intersection = (binary_cube & mask_area)\n iou = intersection.float().sum() / mask_area.float().sum()\n if iou > 0.90:\n # print(f\"Mask value {value} has IOU > 0.90 in binary cube.\")\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==value] = 1\n\n bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return pseudo_labels, bboxes" }, { "identifier": "process_ct_gt", "path": "data_process/demo_data_process.py", "snippet": "def process_ct_gt(case_path, label_path, category, spatial_size):\n print('Data preprocessing...')\n # transform\n img_loader = transforms.LoadImage()\n transform = transforms.Compose(\n [\n transforms.Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ForegroundNormalization(keys=[\"image\"]),\n DimTranspose(keys=[\"image\", \"label\"]),\n MinMaxNormalization(),\n transforms.SpatialPadd(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='constant'),\n transforms.CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n transforms.ToTensord(keys=[\"image\", \"label\"]),\n ]\n )\n zoom_out_transform = transforms.Resized(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='nearest-exact')\n\n ###\n item = {}\n # generate ct_voxel_ndarray\n ct_voxel_ndarray, _ = img_loader(case_path)\n print(type(ct_voxel_ndarray))\n ct_voxel_ndarray = np.array(ct_voxel_ndarray).squeeze()\n ct_shape = ct_voxel_ndarray.shape\n ct_voxel_ndarray = np.expand_dims(ct_voxel_ndarray, axis=0)\n item['image'] = ct_voxel_ndarray\n\n # generate gt_voxel_ndarray\n gt_voxel_ndarray, _ = img_loader(label_path)\n gt_voxel_ndarray = np.array(gt_voxel_ndarray)\n present_categories = np.unique(gt_voxel_ndarray)\n gt_masks = []\n for cls_idx in range(len(category)):\n # ignore background\n cls = cls_idx + 1\n if cls not in present_categories:\n gt_voxel_ndarray_category = np.zeros(ct_shape)\n gt_masks.append(gt_voxel_ndarray_category)\n else:\n gt_voxel_ndarray_category = gt_voxel_ndarray.copy()\n gt_voxel_ndarray_category[gt_voxel_ndarray != cls] = 0\n gt_voxel_ndarray_category[gt_voxel_ndarray == cls] = 1\n gt_masks.append(gt_voxel_ndarray_category)\n gt_voxel_ndarray = np.stack(gt_masks, axis=0)\n assert gt_voxel_ndarray.shape[0] == len(category) and gt_voxel_ndarray.shape[1:] == ct_voxel_ndarray.shape[1:]\n item['label'] = gt_voxel_ndarray.astype(np.int32)\n\n # transform\n item = transform(item)\n item_zoom_out = zoom_out_transform(item)\n item['zoom_out_image'] = item_zoom_out['image']\n item['zoom_out_label'] = item_zoom_out['label']\n print( 'Zoom_in image shape: ', item['image'].shape, \n '\\nZoom_in label shape: ', item['label'].shape,\n '\\nZoom_out image shape: ', item['zoom_out_image'].shape,\n '\\nZoom_out label shape: ', item['zoom_out_label'].shape,\n )\n return item" }, { "identifier": "sliding_window_inference", "path": "utils/monai_inferers_utils.py", "snippet": "def sliding_window_inference(\n inputs: torch.Tensor,\n prompt_reflection: Union[torch.Tensor, Tuple[torch.Tensor, ...]],\n roi_size: Union[Sequence[int], int],\n sw_batch_size: int,\n predictor: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor], Dict[Any, torch.Tensor]]],\n overlap: float = 0.25,\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,\n cval: float = 0.0,\n sw_device: Union[torch.device, str, None] = None,\n device: Union[torch.device, str, None] = None,\n progress: bool = False,\n roi_weight_map: Union[torch.Tensor, None] = None,\n *args: Any,\n **kwargs: Any,\n) -> Union[torch.Tensor, Tuple[torch.Tensor, ...], Dict[Any, torch.Tensor]]:\n \"\"\"\n Sliding window inference on `inputs` with `predictor`.\n\n The outputs of `predictor` could be a tensor, a tuple, or a dictionary of tensors.\n Each output in the tuple or dict value is allowed to have different resolutions with respect to the input.\n e.g., the input patch spatial size is [128,128,128], the output (a tuple of two patches) patch sizes\n could be ([128,64,256], [64,32,128]).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen to ensure the output ROI is still\n an integer. If the predictor's input and output spatial sizes are not equal, we recommend choosing the parameters\n so that `overlap*roi_size*output_size/input_size` is an integer (for each spatial dimension).\n\n When roi_size is larger than the inputs' spatial size, the input image are padded during inference.\n To maintain the same spatial sizes, the output image will be cropped to the original input size.\n\n Args:\n inputs: input image to be processed (assuming NCHW[D])\n roi_size: the spatial window size for inferences.\n When its components have None or non-positives, the corresponding inputs dimension will be used.\n if the components of the `roi_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n sw_batch_size: the batch size to run window slices.\n predictor: given input tensor ``patch_data`` in shape NCHW[D],\n The outputs of the function call ``predictor(patch_data)`` should be a tensor, a tuple, or a dictionary\n with Tensor values. Each output in the tuple or dict value should have the same batch_size, i.e. NM'H'W'[D'];\n where H'W'[D'] represents the output patch's spatial size, M is the number of output channels,\n N is `sw_batch_size`, e.g., the input shape is (7, 1, 128,128,128),\n the output could be a tuple of two tensors, with shapes: ((7, 5, 128, 64, 256), (7, 4, 64, 32, 128)).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen\n to ensure the scaled output ROI sizes are still integers.\n If the `predictor`'s input and output spatial sizes are different,\n we recommend choosing the parameters so that ``overlap*roi_size*zoom_scale`` is an integer for each dimension.\n overlap: Amount of overlap between scans.\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``\"gaussian\"``.\n Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.\n When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding\n spatial dimensions.\n padding_mode: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}\n Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``\"constant\"``\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n cval: fill value for 'constant' padding mode. Default: 0\n sw_device: device for the window data.\n By default the device (and accordingly the memory) of the `inputs` is used.\n Normally `sw_device` should be consistent with the device where `predictor` is defined.\n device: device for the stitched output prediction.\n By default the device (and accordingly the memory) of the `inputs` is used. If for example\n set to device=torch.device('cpu') the gpu memory consumption is less and independent of the\n `inputs` and `roi_size`. Output is on the `device`.\n progress: whether to print a `tqdm` progress bar.\n roi_weight_map: pre-computed (non-negative) weight map for each ROI.\n If not given, and ``mode`` is not `constant`, this map will be computed on the fly.\n args: optional args to be passed to ``predictor``.\n kwargs: optional keyword args to be passed to ``predictor``.\n\n Note:\n - input must be channel-first and have a batch dim, supports N-D sliding window.\n\n \"\"\"\n print('sliding window inference for ROI')\n text = kwargs['text']\n use_box = kwargs['use_box']\n use_point = kwargs['use_point']\n assert not (use_box and use_point)\n compute_dtype = inputs.dtype\n num_spatial_dims = len(inputs.shape) - 2\n if overlap < 0 or overlap >= 1:\n raise ValueError(\"overlap must be >= 0 and < 1.\")\n\n # determine image spatial size and batch size\n # Note: all input images must have the same image size and batch size\n batch_size, _, *image_size_ = inputs.shape\n\n if device is None:\n device = inputs.device\n if sw_device is None:\n sw_device = inputs.device\n\n roi_size = fall_back_tuple(roi_size, image_size_)\n # in case that image size is smaller than roi size\n image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))\n pad_size = []\n for k in range(len(inputs.shape) - 1, 1, -1):\n diff = max(roi_size[k - 2] - inputs.shape[k], 0)\n half = diff // 2\n pad_size.extend([half, diff - half])\n inputs = F.pad(inputs, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n if use_point or use_box:\n binary_prompt_map, global_preds = prompt_reflection\n global_preds = F.pad(global_preds, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)\n\n # Store all slices in list\n slices = dense_patch_slices(image_size, roi_size, scan_interval)\n num_win = len(slices) # number of windows per image\n total_slices = num_win * batch_size # total number of windows\n\n # Create window-level importance map\n valid_patch_size = get_valid_patch_size(image_size, roi_size)\n if valid_patch_size == roi_size and (roi_weight_map is not None):\n importance_map = roi_weight_map\n else:\n try:\n importance_map = compute_importance_map(valid_patch_size, mode=mode, sigma_scale=sigma_scale, device=device)\n except BaseException as e:\n raise RuntimeError(\n \"Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'.\"\n ) from e\n importance_map = convert_data_type(importance_map, torch.Tensor, device, compute_dtype)[0] # type: ignore\n # handle non-positive weights\n min_non_zero = max(importance_map[importance_map != 0].min().item(), 1e-3)\n importance_map = torch.clamp(importance_map.to(torch.float32), min=min_non_zero).to(compute_dtype)\n\n # Perform predictions\n dict_key, output_image_list, count_map_list = None, [], []\n _initialized_ss = -1\n is_tensor_output = True # whether the predictor's output is a tensor (instead of dict/tuple)\n\n # for each patch\n for slice_g in tqdm(range(0, total_slices, sw_batch_size)) if progress else range(0, total_slices, sw_batch_size):\n slice_range = range(slice_g, min(slice_g + sw_batch_size, total_slices))\n unravel_slice = [\n [slice(int(idx / num_win), int(idx / num_win) + 1), slice(None)] + list(slices[idx % num_win])\n for idx in slice_range\n ]\n window_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n #############\n \n boxes = None\n points = None\n if use_point:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n point, point_label = select_points(window_binary_prompt_map.squeeze())\n points = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) \n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n if use_box:\n if num_win == 1:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(window_binary_prompt_map.squeeze()).unsqueeze(0).float().cuda()\n else:\n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n seg_prob_out = predictor(window_data, text, boxes, points) # batched patch segmentation\n #############\n # convert seg_prob_out to tuple seg_prob_tuple, this does not allocate new memory.\n seg_prob_tuple: Tuple[torch.Tensor, ...]\n if isinstance(seg_prob_out, torch.Tensor):\n seg_prob_tuple = (seg_prob_out,)\n elif isinstance(seg_prob_out, Mapping):\n if dict_key is None:\n dict_key = sorted(seg_prob_out.keys()) # track predictor's output keys\n seg_prob_tuple = tuple(seg_prob_out[k] for k in dict_key)\n is_tensor_output = False\n else:\n seg_prob_tuple = ensure_tuple(seg_prob_out)\n is_tensor_output = False\n\n # for each output in multi-output list\n for ss, seg_prob in enumerate(seg_prob_tuple):\n seg_prob = seg_prob.to(device) # BxCxMxNxP or BxCxMxN\n\n # compute zoom scale: out_roi_size/in_roi_size\n zoom_scale = []\n for axis, (img_s_i, out_w_i, in_w_i) in enumerate(\n zip(image_size, seg_prob.shape[2:], window_data.shape[2:])\n ):\n _scale = out_w_i / float(in_w_i)\n if not (img_s_i * _scale).is_integer():\n warnings.warn(\n f\"For spatial axis: {axis}, output[{ss}] will have non-integer shape. Spatial \"\n f\"zoom_scale between output[{ss}] and input is {_scale}. Please pad inputs.\"\n )\n zoom_scale.append(_scale)\n\n if _initialized_ss < ss: # init. the ss-th buffer at the first iteration\n # construct multi-resolution outputs\n output_classes = seg_prob.shape[1]\n output_shape = [batch_size, output_classes] + [\n int(image_size_d * zoom_scale_d) for image_size_d, zoom_scale_d in zip(image_size, zoom_scale)\n ]\n # allocate memory to store the full output and the count for overlapping parts\n output_image_list.append(torch.zeros(output_shape, dtype=compute_dtype, device=device))\n count_map_list.append(torch.zeros([1, 1] + output_shape[2:], dtype=compute_dtype, device=device))\n _initialized_ss += 1\n\n # resizing the importance_map\n resizer = Resize(spatial_size=seg_prob.shape[2:], mode=\"nearest\", anti_aliasing=False)\n\n # store the result in the proper location of the full output. Apply weights from importance map.\n for idx, original_idx in zip(slice_range, unravel_slice):\n # zoom roi\n original_idx_zoom = list(original_idx) # 4D for 2D image, 5D for 3D image\n for axis in range(2, len(original_idx_zoom)):\n zoomed_start = original_idx[axis].start * zoom_scale[axis - 2]\n zoomed_end = original_idx[axis].stop * zoom_scale[axis - 2]\n if not zoomed_start.is_integer() or (not zoomed_end.is_integer()):\n warnings.warn(\n f\"For axis-{axis-2} of output[{ss}], the output roi range is not int. \"\n f\"Input roi range is ({original_idx[axis].start}, {original_idx[axis].stop}). \"\n f\"Spatial zoom_scale between output[{ss}] and input is {zoom_scale[axis - 2]}. \"\n f\"Corresponding output roi range is ({zoomed_start}, {zoomed_end}).\\n\"\n f\"Please change overlap ({overlap}) or roi_size ({roi_size[axis-2]}) for axis-{axis-2}. \"\n \"Tips: if overlap*roi_size*zoom_scale is an integer, it usually works.\"\n )\n original_idx_zoom[axis] = slice(int(zoomed_start), int(zoomed_end), None)\n importance_map_zoom = resizer(importance_map.unsqueeze(0))[0].to(compute_dtype)\n # store results and weights\n output_image_list[ss][original_idx_zoom] += importance_map_zoom * seg_prob[idx - slice_g]\n count_map_list[ss][original_idx_zoom] += (\n importance_map_zoom.unsqueeze(0).unsqueeze(0).expand(count_map_list[ss][original_idx_zoom].shape)\n )\n\n # account for any overlapping sections\n for ss in range(len(output_image_list)):\n output_image_list[ss] = (output_image_list[ss] / count_map_list.pop(0)).to(compute_dtype)\n\n # remove padding if image_size smaller than roi_size\n for ss, output_i in enumerate(output_image_list):\n if torch.isnan(output_i).any() or torch.isinf(output_i).any():\n warnings.warn(\"Sliding window inference results contain NaN or Inf.\")\n\n zoom_scale = [\n seg_prob_map_shape_d / roi_size_d for seg_prob_map_shape_d, roi_size_d in zip(output_i.shape[2:], roi_size)\n ]\n\n final_slicing: List[slice] = []\n for sp in range(num_spatial_dims):\n slice_dim = slice(pad_size[sp * 2], image_size_[num_spatial_dims - sp - 1] + pad_size[sp * 2])\n slice_dim = slice(\n int(round(slice_dim.start * zoom_scale[num_spatial_dims - sp - 1])),\n int(round(slice_dim.stop * zoom_scale[num_spatial_dims - sp - 1])),\n )\n final_slicing.insert(0, slice_dim)\n while len(final_slicing) < len(output_i.shape):\n final_slicing.insert(0, slice(None))\n output_image_list[ss] = output_i[final_slicing]\n\n if dict_key is not None: # if output of predictor is a dict\n final_output = dict(zip(dict_key, output_image_list))\n else:\n final_output = tuple(output_image_list) # type: ignore\n return final_output[0] if is_tensor_output else final_output # type: ignore" }, { "identifier": "generate_box", "path": "utils/monai_inferers_utils.py", "snippet": "def generate_box(pred_pre, bbox_shift=None):\n meaning_post_label = pred_pre # [h, w, d]\n ones_idx = (meaning_post_label > 0).nonzero(as_tuple=True)\n if all(tensor.nelement() == 0 for tensor in ones_idx):\n bboxes = torch.tensor([-1,-1,-1,-1,-1,-1])\n # print(bboxes, bboxes.shape)\n return bboxes\n min_coords = [dim.min() for dim in ones_idx] # [x_min, y_min, z_min]\n max_coords = [dim.max() for dim in ones_idx] # [x_max, y_max, z_max]\n\n\n if bbox_shift is None:\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor)\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor)\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)\n else:\n # add perturbation to bounding box coordinates\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor + random.randint(-bbox_shift, bbox_shift))\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor + random.randint(-bbox_shift, bbox_shift))\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)" }, { "identifier": "select_points", "path": "utils/monai_inferers_utils.py", "snippet": "def select_points(preds, num_positive_extra=4, num_negative_extra=0, fix_extra_point_num=None):\n spacial_dim = 3\n points = torch.zeros((0, 3))\n labels = torch.zeros((0))\n pos_thred = 0.9\n neg_thred = 0.1\n \n # get pos/net indices\n positive_indices = torch.nonzero(preds > pos_thred, as_tuple=True) # ([pos x], [pos y], [pos z])\n negative_indices = torch.nonzero(preds < neg_thred, as_tuple=True)\n\n ones_idx = (preds > pos_thred).nonzero(as_tuple=True)\n if all(tmp.nelement() == 0 for tmp in ones_idx):\n # all neg\n num_positive_extra = 0\n selected_positive_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n else:\n # random select a pos point\n random_idx = torch.randint(len(positive_indices[0]), (1,))\n selected_positive_point = torch.tensor([positive_indices[i][random_idx] for i in range(spacial_dim)]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.ones((1))))\n\n if num_positive_extra > 0:\n pos_idx_list = torch.randperm(len(positive_indices[0]))[:num_positive_extra]\n extra_positive_points = []\n for pos_idx in pos_idx_list:\n extra_positive_points.append([positive_indices[i][pos_idx] for i in range(spacial_dim)])\n extra_positive_points = torch.tensor(extra_positive_points).reshape(-1, 3)\n points = torch.cat((points, extra_positive_points), dim=0)\n labels = torch.cat((labels, torch.ones((extra_positive_points.shape[0]))))\n\n if num_negative_extra > 0:\n neg_idx_list = torch.randperm(len(negative_indices[0]))[:num_negative_extra]\n extra_negative_points = []\n for neg_idx in neg_idx_list:\n extra_negative_points.append([negative_indices[i][neg_idx] for i in range(spacial_dim)])\n extra_negative_points = torch.tensor(extra_negative_points).reshape(-1, 3)\n points = torch.cat((points, extra_negative_points), dim=0)\n labels = torch.cat((labels, torch.zeros((extra_negative_points.shape[0]))))\n # print('extra_negative_points ', extra_negative_points, extra_negative_points.shape)\n # print('==> points ', points.shape, labels)\n \n if fix_extra_point_num is None:\n left_point_num = num_positive_extra + num_negative_extra + 1 - labels.shape[0]\n else:\n left_point_num = fix_extra_point_num + 1 - labels.shape[0]\n\n for _ in range(left_point_num):\n ignore_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, ignore_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n\n return (points, labels)" }, { "identifier": "build_binary_cube", "path": "utils/monai_inferers_utils.py", "snippet": "def build_binary_cube(bbox, binary_cube_shape):\n min_coord = bbox[0][:3].int().tolist()\n max_coord = bbox[0][3:].int().tolist()\n binary_cube = torch.zeros(binary_cube_shape)\n binary_cube[min_coord[0]:max_coord[0]+1, min_coord[1]:max_coord[1]+1, min_coord[2]:max_coord[2]+1] = 1\n return binary_cube" }, { "identifier": "build_binary_points", "path": "utils/monai_inferers_utils.py", "snippet": "def build_binary_points(points, labels, shape):\n binary_points = torch.zeros(shape, dtype=torch.int16)\n binary_points[points[labels == 1, 0].long(), points[labels == 1, 1].long(), points[labels == 1, 2].long()] = 1\n return binary_points" }, { "identifier": "logits2roi_coor", "path": "utils/monai_inferers_utils.py", "snippet": "def logits2roi_coor(spatial_size, logits_global_single):\n # crop predict\n pred_global_single = torch.sigmoid(logits_global_single) > 0.5\n ## get all pos idx\n nonzero_indices = torch.nonzero(pred_global_single)\n if nonzero_indices.shape[0] == 0:\n return None, None, None, None, None, None\n ## get boundary\n min_d, max_d = nonzero_indices[:, 0].min(), nonzero_indices[:, 0].max()\n min_h, max_h = nonzero_indices[:, 1].min(), nonzero_indices[:, 1].max()\n min_w, max_w = nonzero_indices[:, 2].min(), nonzero_indices[:, 2].max()\n ## padding\n crop_d, crop_h, crop_w = max_d - min_d + 1, max_h - min_h + 1, max_w - min_w + 1,\n window_d, window_h, window_w = spatial_size\n padding_d, padding_h, padding_w = max(0, window_d-crop_d), max(0, window_h-crop_h), max(0, window_w-crop_w)\n global_d, global_h, global_w = logits_global_single.shape\n min_d = max(0, min_d - int(padding_d)//2)\n min_h = max(0, min_h - int(padding_h)//2)\n min_w = max(0, min_w - int(padding_w)//2)\n max_d = min(global_d, max_d + int(padding_d)//2)\n max_h = min(global_h, max_h + int(padding_h)//2)\n max_w = min(global_w, max_w + int(padding_w)//2)\n return min_d, min_h, min_w, max_d, max_h, max_w" }, { "identifier": "draw_result", "path": "utils/visualize.py", "snippet": "def draw_result(category, image, bboxes, points, logits, gt3D, spatial_size, work_dir):\n zoom_out_transform = transforms.Compose([\n transforms.AddChanneld(keys=[\"image\", \"label\", \"logits\"]),\n transforms.Resized(keys=[\"image\", \"label\", \"logits\"], spatial_size=spatial_size, mode='nearest-exact')\n ])\n post_item = zoom_out_transform({\n 'image': image,\n 'label': gt3D,\n 'logits': logits\n })\n image, gt3D, logits = post_item['image'][0], post_item['label'][0], post_item['logits'][0]\n preds = torch.sigmoid(logits)\n preds = (preds > 0.5).int()\n\n root_dir=os.path.join(work_dir, f'fig_examples/{category}/') \n\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n if bboxes is not None:\n x1, y1, z1, x2, y2, z2 = bboxes[0].cpu().numpy()\n if points is not None:\n points = (points[0].cpu().numpy(), points[1].cpu().numpy())\n points_ax = points[0][0] # [n, 3]\n points_label = points[1][0] # [n]\n\n for j in range(image.shape[0]):\n img_2d = image[j, :, :].detach().cpu().numpy()\n preds_2d = preds[j, :, :].detach().cpu().numpy()\n label_2d = gt3D[j, :, :].detach().cpu().numpy()\n if np.sum(label_2d) == 0 or np.sum(preds_2d) == 0:\n continue\n\n img_2d = img_2d * 255\n # orginal img\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.imshow(img_2d, cmap='gray')\n ax1.set_title('Image with prompt') \n ax1.axis('off')\n\n # gt\n ax2.imshow(img_2d, cmap='gray')\n show_mask(label_2d, ax2)\n ax2.set_title('Ground truth') \n ax2.axis('off')\n\n # preds\n ax3.imshow(img_2d, cmap='gray')\n show_mask(preds_2d, ax3)\n ax3.set_title('Prediction') \n ax3.axis('off')\n\n # boxes\n if bboxes is not None:\n if j >= x1 and j <= x2:\n show_box((z1, y1, z2, y2), ax1)\n # points\n if points is not None:\n for point_idx in range(points_label.shape[0]):\n point = points_ax[point_idx]\n label = points_label[point_idx] # [1]\n if j == point[0]:\n show_points(point, label, ax1)\n \n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)\n plt.savefig(os.path.join(root_dir, f'{category}_{j}.png'), bbox_inches='tight')\n plt.close()" } ]
import argparse import os import torch import torch.nn.functional as F import json import monai.transforms as transforms from segment_anything_volumetric import sam_model_registry from network.model import SegVol from data_process.demo_data_process import process_ct_gt from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor from utils.visualize import draw_result
10,965
parser.add_argument("--resume", type = str, default = '') parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap") parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple) parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple) parser.add_argument('-work_dir', type=str, default='./work_dir') ### demo parser.add_argument('--demo_config', type=str, required=True) parser.add_argument("--clip_ckpt", type = str, default = './config/clip') args = parser.parse_args() return args def dice_score(preds, labels): # on GPU assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape) predict = preds.view(1, -1) target = labels.view(1, -1) if target.shape[1] < 1e8: predict = predict.cuda() target = target.cuda() predict = torch.sigmoid(predict) predict = torch.where(predict > 0.5, 1., 0.) tp = torch.sum(torch.mul(predict, target)) den = torch.sum(predict) + torch.sum(target) + 1 dice = 2 * tp / den if target.shape[1] < 1e8: predict = predict.cpu() target = target.cpu() return dice def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None): logits_labels_record = {} image_single_resize = image_resize image_single = image[0,0] ori_shape = image_single.shape for item_idx in range(len(categories)): # get label to generate prompts label_single = gt3D[0][item_idx] label_single_resize = gt3D_resize[0][item_idx] # skip meaningless categories if torch.sum(label_single) == 0: print('No object, skip') continue # generate prompts text_single = categories[item_idx] if args.use_text_prompt else None if categories is not None: print(f'inference |{categories[item_idx]}| target...') points_single = None box_single = None if args.use_point_prompt: point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3) points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape) if args.use_box_prompt: box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) #################### # zoom-out inference: print('--- zoom out inference ---') print(f'use text-prompt [{text_single!=None}], use box-prompt [{box_single!=None}], use point-prompt [{points_single!=None}]') with torch.no_grad(): logits_global_single = segvol_model(image_single_resize.cuda(), text=text_single, boxes=box_single, points=points_single) # resize back global logits logits_global_single = F.interpolate( logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad():
def set_parse(): # %% set up parser parser = argparse.ArgumentParser() parser.add_argument("--test_mode", default=True, type=bool) parser.add_argument("--resume", type = str, default = '') parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap") parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple) parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple) parser.add_argument('-work_dir', type=str, default='./work_dir') ### demo parser.add_argument('--demo_config', type=str, required=True) parser.add_argument("--clip_ckpt", type = str, default = './config/clip') args = parser.parse_args() return args def dice_score(preds, labels): # on GPU assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape) predict = preds.view(1, -1) target = labels.view(1, -1) if target.shape[1] < 1e8: predict = predict.cuda() target = target.cuda() predict = torch.sigmoid(predict) predict = torch.where(predict > 0.5, 1., 0.) tp = torch.sum(torch.mul(predict, target)) den = torch.sum(predict) + torch.sum(target) + 1 dice = 2 * tp / den if target.shape[1] < 1e8: predict = predict.cpu() target = target.cpu() return dice def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None): logits_labels_record = {} image_single_resize = image_resize image_single = image[0,0] ori_shape = image_single.shape for item_idx in range(len(categories)): # get label to generate prompts label_single = gt3D[0][item_idx] label_single_resize = gt3D_resize[0][item_idx] # skip meaningless categories if torch.sum(label_single) == 0: print('No object, skip') continue # generate prompts text_single = categories[item_idx] if args.use_text_prompt else None if categories is not None: print(f'inference |{categories[item_idx]}| target...') points_single = None box_single = None if args.use_point_prompt: point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3) points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape) if args.use_box_prompt: box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) #################### # zoom-out inference: print('--- zoom out inference ---') print(f'use text-prompt [{text_single!=None}], use box-prompt [{box_single!=None}], use point-prompt [{points_single!=None}]') with torch.no_grad(): logits_global_single = segvol_model(image_single_resize.cuda(), text=text_single, boxes=box_single, points=points_single) # resize back global logits logits_global_single = F.interpolate( logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad():
logits_single_cropped = sliding_window_inference(
3
2023-11-10 08:25:37+00:00
16k
theroyallab/tabbyAPI
main.py
[ { "identifier": "convert_args_to_dict", "path": "args.py", "snippet": "def convert_args_to_dict(args: argparse.Namespace, parser: argparse.ArgumentParser):\n \"\"\"Broad conversion of surface level arg groups to dictionaries\"\"\"\n\n arg_groups = {}\n for group in parser._action_groups:\n group_dict = {}\n for arg in group._group_actions:\n value = getattr(args, arg.dest, None)\n if value is not None:\n group_dict[arg.dest] = value\n\n arg_groups[group.title] = group_dict\n\n return arg_groups" }, { "identifier": "init_argparser", "path": "args.py", "snippet": "def init_argparser():\n \"\"\"Creates an argument parser that any function can use\"\"\"\n\n parser = argparse.ArgumentParser(\n epilog=\"These args are only for a subset of the config. \"\n + \"Please edit config.yml for all options!\"\n )\n add_network_args(parser)\n add_model_args(parser)\n add_logging_args(parser)\n add_config_args(parser)\n\n return parser" }, { "identifier": "check_admin_key", "path": "auth.py", "snippet": "def check_admin_key(x_admin_key: str = Header(None), authorization: str = Header(None)):\n \"\"\"Check if the admin key is valid.\"\"\"\n\n # Allow request if auth is disabled\n if DISABLE_AUTH:\n return\n\n if x_admin_key:\n if not AUTH_KEYS.verify_key(x_admin_key, \"admin_key\"):\n raise HTTPException(401, \"Invalid admin key\")\n return x_admin_key\n\n if authorization:\n split_key = authorization.split(\" \")\n if len(split_key) < 2:\n raise HTTPException(401, \"Invalid admin key\")\n if split_key[0].lower() != \"bearer\" or not AUTH_KEYS.verify_key(\n split_key[1], \"admin_key\"\n ):\n raise HTTPException(401, \"Invalid admin key\")\n return authorization\n\n raise HTTPException(401, \"Please provide an admin key\")" }, { "identifier": "check_api_key", "path": "auth.py", "snippet": "def check_api_key(x_api_key: str = Header(None), authorization: str = Header(None)):\n \"\"\"Check if the API key is valid.\"\"\"\n\n # Allow request if auth is disabled\n if DISABLE_AUTH:\n return\n\n if x_api_key:\n if not AUTH_KEYS.verify_key(x_api_key, \"api_key\"):\n raise HTTPException(401, \"Invalid API key\")\n return x_api_key\n\n if authorization:\n split_key = authorization.split(\" \")\n if len(split_key) < 2:\n raise HTTPException(401, \"Invalid API key\")\n if split_key[0].lower() != \"bearer\" or not AUTH_KEYS.verify_key(\n split_key[1], \"api_key\"\n ):\n raise HTTPException(401, \"Invalid API key\")\n\n return authorization\n\n raise HTTPException(401, \"Please provide an API key\")" }, { "identifier": "load_auth_keys", "path": "auth.py", "snippet": "def load_auth_keys(disable_from_config: bool):\n \"\"\"Load the authentication keys from api_tokens.yml. If the file does not\n exist, generate new keys and save them to api_tokens.yml.\"\"\"\n global AUTH_KEYS\n global DISABLE_AUTH\n\n DISABLE_AUTH = disable_from_config\n if disable_from_config:\n logger.warning(\n \"Disabling authentication makes your instance vulnerable. \"\n \"Set the `disable_auth` flag to False in config.yml if you \"\n \"want to share this instance with others.\"\n )\n\n return\n\n try:\n with open(\"api_tokens.yml\", \"r\", encoding=\"utf8\") as auth_file:\n auth_keys_dict = yaml.safe_load(auth_file)\n AUTH_KEYS = AuthKeys.model_validate(auth_keys_dict)\n except OSError:\n new_auth_keys = AuthKeys(\n api_key=secrets.token_hex(16), admin_key=secrets.token_hex(16)\n )\n AUTH_KEYS = new_auth_keys\n\n with open(\"api_tokens.yml\", \"w\", encoding=\"utf8\") as auth_file:\n yaml.safe_dump(AUTH_KEYS.model_dump(), auth_file, default_flow_style=False)\n\n logger.info(\n f\"Your API key is: {AUTH_KEYS.api_key}\\n\"\n f\"Your admin key is: {AUTH_KEYS.admin_key}\\n\\n\"\n \"If these keys get compromised, make sure to delete api_tokens.yml \"\n \"and restart the server. Have fun!\"\n )" }, { "identifier": "override_config_from_args", "path": "config.py", "snippet": "def override_config_from_args(args: dict):\n \"\"\"Overrides the config based on a dict representation of args\"\"\"\n\n config_override = unwrap(args.get(\"options\", {}).get(\"config\"))\n if config_override:\n logger.info(\"Attempting to override config.yml from args.\")\n read_config_from_file(pathlib.Path(config_override))\n return\n\n # Network config\n network_override = args.get(\"network\")\n if network_override:\n network_config = get_network_config()\n GLOBAL_CONFIG[\"network\"] = {**network_config, **network_override}\n\n # Model config\n model_override = args.get(\"model\")\n if model_override:\n model_config = get_model_config()\n GLOBAL_CONFIG[\"model\"] = {**model_config, **model_override}\n\n # Logging config\n logging_override = args.get(\"logging\")\n if logging_override:\n logging_config = get_gen_logging_config()\n GLOBAL_CONFIG[\"logging\"] = {\n **logging_config,\n **{k.replace(\"log_\", \"\"): logging_override[k] for k in logging_override},\n }" }, { "identifier": "read_config_from_file", "path": "config.py", "snippet": "def read_config_from_file(config_path: pathlib.Path):\n \"\"\"Sets the global config from a given file path\"\"\"\n global GLOBAL_CONFIG\n\n try:\n with open(str(config_path.resolve()), \"r\", encoding=\"utf8\") as config_file:\n GLOBAL_CONFIG = unwrap(yaml.safe_load(config_file), {})\n except Exception as exc:\n logger.error(\n \"The YAML config couldn't load because of the following error: \"\n f\"\\n\\n{exc}\"\n \"\\n\\nTabbyAPI will start anyway and not parse this config file.\"\n )\n GLOBAL_CONFIG = {}" }, { "identifier": "get_gen_logging_config", "path": "config.py", "snippet": "def get_gen_logging_config():\n \"\"\"Returns the generation logging config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"logging\"), {})" }, { "identifier": "get_model_config", "path": "config.py", "snippet": "def get_model_config():\n \"\"\"Returns the model config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"model\"), {})" }, { "identifier": "get_draft_model_config", "path": "config.py", "snippet": "def get_draft_model_config():\n \"\"\"Returns the draft model config from the global config\"\"\"\n model_config = unwrap(GLOBAL_CONFIG.get(\"model\"), {})\n return unwrap(model_config.get(\"draft\"), {})" }, { "identifier": "get_lora_config", "path": "config.py", "snippet": "def get_lora_config():\n \"\"\"Returns the lora config from the global config\"\"\"\n model_config = unwrap(GLOBAL_CONFIG.get(\"model\"), {})\n return unwrap(model_config.get(\"lora\"), {})" }, { "identifier": "get_network_config", "path": "config.py", "snippet": "def get_network_config():\n \"\"\"Returns the network config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"network\"), {})" }, { "identifier": "call_with_semaphore", "path": "generators.py", "snippet": "async def call_with_semaphore(callback: partialmethod):\n if inspect.iscoroutinefunction(callback):\n return await callback()\n async with generate_semaphore:\n return callback()" }, { "identifier": "generate_with_semaphore", "path": "generators.py", "snippet": "async def generate_with_semaphore(generator: AsyncGenerator):\n \"\"\"Generate with a semaphore.\"\"\"\n async with generate_semaphore:\n if inspect.isasyncgenfunction:\n async for result in generator():\n yield result\n else:\n for result in generator():\n yield result" }, { "identifier": "ModelContainer", "path": "model.py", "snippet": "class ModelContainer:\n \"\"\"The model container class for ExLlamaV2 models.\"\"\"\n\n config: Optional[ExLlamaV2Config] = None\n draft_config: Optional[ExLlamaV2Config] = None\n model: Optional[ExLlamaV2] = None\n draft_model: Optional[ExLlamaV2] = None\n cache: Optional[ExLlamaV2Cache] = None\n draft_cache: Optional[ExLlamaV2Cache] = None\n tokenizer: Optional[ExLlamaV2Tokenizer] = None\n generator: Optional[ExLlamaV2StreamingGenerator] = None\n prompt_template: Optional[PromptTemplate] = None\n\n cache_fp8: bool = False\n gpu_split_auto: bool = True\n gpu_split: Optional[list] = None\n use_cfg: bool = False\n\n active_loras: List[ExLlamaV2Lora] = []\n\n def __init__(self, model_directory: pathlib.Path, quiet=False, **kwargs):\n \"\"\"\n Create model container\n\n Args:\n model_dir (int): Model directory containing config.json,\n tokenizer.model etc.\n quiet (bool): Suppress console output\n load_progress_callback (function, optional): A function to call for\n each module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int,\n loading_draft: bool)\n **kwargs:\n `cache_mode` (str): Sets cache mode, \"FP16\" or \"FP8\"\n (defaulf: \"FP16\")\n 'max_seq_len' (int): Override model's default max sequence\n length (default: 4096)\n 'rope_scale' (float): Set RoPE scaling factor for model\n (default: 1.0)\n 'rope_alpha' (float): Set RoPE alpha (NTK) factor for model\n (default: 1.0)\n 'prompt_template' (str): Manually sets the prompt template for\n this model (default: None)\n 'chunk_size' (int): Sets the maximum chunk size for the model\n (default: 2048)\n Inferencing in chunks reduces overall VRAM overhead by\n processing very long sequences in smaller batches. This\n limits the size of temporary buffers needed for the hidden\n state and attention weights.\n 'draft_model_dir' (str): Draft model directory\n 'draft_rope_scale' (float): Set RoPE scaling factor for draft\n model (default: 1.0)\n 'draft_rope_alpha' (float): RoPE alpha (NTK) factor for draft\n model. By default, the draft model's alpha value is\n calculated automatically to scale to the size of the\n full model.\n 'lora_dir' (str): LoRA directory\n 'loras' (list[dict]): List of loras to be loaded, consisting of\n 'name' and 'scaling'\n 'gpu_split_auto' (bool): Automatically split model across\n available devices (default: True)\n 'gpu_split' (list[float]): Allocation for weights and (some)\n tensors, per device\n 'no_flash_attn' (bool): Turns off flash attention\n (increases vram usage) (default: False)\n 'use_cfg\" (bool): Enables CFG support. Disables flash attention\n (default: False)\n \"\"\"\n\n self.quiet = quiet\n\n self.cache_fp8 = \"cache_mode\" in kwargs and kwargs[\"cache_mode\"] == \"FP8\"\n self.gpu_split = kwargs.get(\"gpu_split\")\n self.gpu_split_auto = unwrap(kwargs.get(\"gpu_split_auto\"), True)\n\n self.config = ExLlamaV2Config()\n self.config.model_dir = str(model_directory.resolve())\n\n # Make the max seq len 4096 before preparing the config\n # This is a better default than 2038\n self.config.max_seq_len = 4096\n self.config.prepare()\n\n # Then override the base_seq_len if present\n override_base_seq_len = kwargs.get(\"override_base_seq_len\")\n if override_base_seq_len:\n self.config.max_seq_len = override_base_seq_len\n\n # Grab the base model's sequence length before overrides for\n # rope calculations\n base_seq_len = self.config.max_seq_len\n\n # Set the target seq len if present\n target_max_seq_len = kwargs.get(\"max_seq_len\")\n if target_max_seq_len:\n self.config.max_seq_len = target_max_seq_len\n\n # Set the rope scale\n self.config.scale_pos_emb = unwrap(\n kwargs.get(\"rope_scale\"), self.config.scale_pos_emb\n )\n\n # Automatically calculate rope alpha\n self.config.scale_alpha_value = unwrap(\n kwargs.get(\"rope_alpha\"), self.calculate_rope_alpha(base_seq_len)\n )\n\n if hasattr(ExLlamaV2Sampler.Settings, \"cfg_scale\"):\n self.use_cfg = unwrap(kwargs.get(\"use_cfg\"), False)\n else:\n logger.warning(\n \"CFG is not supported by the currently installed ExLlamaV2 version.\"\n )\n\n # Turn off flash attention if CFG is on\n # Workaround until batched FA2 is fixed in exllamav2 upstream\n self.config.no_flash_attn = (\n True if self.use_cfg else unwrap(kwargs.get(\"no_flash_attention\"), False)\n )\n\n # low_mem is currently broken in exllamav2. Don't use it until it's\n # fixed.\n \"\"\"\n if \"low_mem\" in kwargs and kwargs[\"low_mem\"]:\n self.config.set_low_mem()\n \"\"\"\n\n # Set prompt template override if provided\n prompt_template_name = kwargs.get(\"prompt_template\")\n if prompt_template_name:\n logger.info(\"Loading prompt template with name \" f\"{prompt_template_name}\")\n # Read the template\n self.prompt_template = get_template_from_file(prompt_template_name)\n else:\n # Then try finding the template from the tokenizer_config.json\n self.prompt_template = get_template_from_model_json(\n pathlib.Path(self.config.model_dir) / \"tokenizer_config.json\",\n \"chat_template\",\n \"from_tokenizer_config\",\n )\n\n # Try finding the chat template from the model's config.json\n # TODO: This may not even be used with huggingface models,\n # mark for removal.\n if self.prompt_template is None:\n self.prompt_template = get_template_from_model_json(\n pathlib.Path(self.config.model_config),\n \"chat_template\",\n \"from_model_config\",\n )\n\n # If that fails, attempt fetching from model name\n if self.prompt_template is None:\n template_match = find_template_from_model(model_directory)\n if template_match:\n self.prompt_template = get_template_from_file(template_match)\n\n # Catch all for template lookup errors\n if self.prompt_template:\n logger.info(\n f\"Using template {self.prompt_template.name} \" \"for chat completions.\"\n )\n else:\n logger.warning(\n \"Chat completions are disabled because a prompt \"\n \"template wasn't provided or auto-detected.\"\n )\n\n # Set num of experts per token if provided\n num_experts_override = kwargs.get(\"num_experts_per_token\")\n if num_experts_override:\n if hasattr(self.config, \"num_experts_per_token\"):\n self.config.num_experts_per_token = num_experts_override\n else:\n logger.warning(\n \"MoE experts per token override is not \"\n \"supported by the current ExLlamaV2 version.\"\n )\n\n chunk_size = min(\n unwrap(kwargs.get(\"chunk_size\"), 2048), self.config.max_seq_len\n )\n self.config.max_input_len = chunk_size\n self.config.max_attn_size = chunk_size**2\n\n draft_args = unwrap(kwargs.get(\"draft\"), {})\n draft_model_name = draft_args.get(\"draft_model_name\")\n enable_draft = draft_args and draft_model_name\n\n # Always disable draft if params are incorrectly configured\n if draft_args and draft_model_name is None:\n logger.warning(\n \"Draft model is disabled because a model name \"\n \"wasn't provided. Please check your config.yml!\"\n )\n enable_draft = False\n\n if enable_draft:\n self.draft_config = ExLlamaV2Config()\n draft_model_path = pathlib.Path(\n unwrap(draft_args.get(\"draft_model_dir\"), \"models\")\n )\n draft_model_path = draft_model_path / draft_model_name\n\n self.draft_config.model_dir = str(draft_model_path.resolve())\n self.draft_config.prepare()\n\n self.draft_config.scale_pos_emb = unwrap(\n draft_args.get(\"draft_rope_scale\"), 1.0\n )\n\n # Automatically calculate draft rope alpha\n self.draft_config.scale_alpha_value = unwrap(\n draft_args.get(\"draft_rope_alpha\"),\n self.calculate_rope_alpha(self.draft_config.max_seq_len),\n )\n self.draft_config.max_seq_len = self.config.max_seq_len\n\n if \"chunk_size\" in kwargs:\n self.draft_config.max_input_len = kwargs[\"chunk_size\"]\n self.draft_config.max_attn_size = kwargs[\"chunk_size\"] ** 2\n\n def calculate_rope_alpha(self, base_seq_len):\n \"\"\"Calculate the rope alpha value for a given sequence length.\"\"\"\n ratio = self.config.max_seq_len / base_seq_len\n\n # Default to a 1 alpha if the sequence length is ever less\n # than or equal to 1\n if ratio <= 1.0:\n alpha = 1\n else:\n alpha = -0.13436 + 0.80541 * ratio + 0.28833 * ratio**2\n return alpha\n\n def get_model_path(self, is_draft: bool = False):\n \"\"\"Get the path for this model.\"\"\"\n model_path = pathlib.Path(\n self.draft_config.model_dir if is_draft else self.config.model_dir\n )\n return model_path\n\n def load(self, progress_callback=None):\n \"\"\"\n Load model\n\n Args:\n progress_callback (function, optional): A function to call for each\n module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int)\n \"\"\"\n for _ in self.load_gen(progress_callback):\n pass\n\n def load_loras(self, lora_directory: pathlib.Path, **kwargs):\n \"\"\"\n Load loras\n \"\"\"\n\n loras = unwrap(kwargs.get(\"loras\"), [])\n success: List[str] = []\n failure: List[str] = []\n\n for lora in loras:\n lora_name = lora.get(\"name\")\n lora_scaling = unwrap(lora.get(\"scaling\"), 1.0)\n\n if lora_name is None:\n logger.warning(\n \"One of your loras does not have a name. Please check your \"\n \"config.yml! Skipping lora load.\"\n )\n failure.append(lora_name)\n continue\n\n logger.info(f\"Loading lora: {lora_name} at scaling {lora_scaling}\")\n lora_path = lora_directory / lora_name\n # FIXME(alpin): Does self.model need to be passed here?\n self.active_loras.append(\n ExLlamaV2Lora.from_directory(self.model, lora_path, lora_scaling)\n )\n logger.info(f\"Lora successfully loaded: {lora_name}\")\n success.append(lora_name)\n\n # Return success and failure names\n return {\"success\": success, \"failure\": failure}\n\n def load_gen(self, progress_callback=None):\n \"\"\"\n Load model, generator function\n\n Args:\n progress_callback (function, optional): A function to call for each\n module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int)\n \"\"\"\n\n # Load tokenizer\n self.tokenizer = ExLlamaV2Tokenizer(self.config)\n\n # Load draft model if a config is present\n if self.draft_config:\n self.draft_model = ExLlamaV2(self.draft_config)\n if not self.quiet:\n logger.info(\"Loading draft model: \" + self.draft_config.model_dir)\n\n self.draft_cache = ExLlamaV2Cache(self.draft_model, lazy=True)\n reserve = [AUTO_SPLIT_RESERVE_BYTES] + [0] * 16\n yield from self.draft_model.load_autosplit_gen(\n self.draft_cache,\n reserve_vram=reserve,\n last_id_only=True,\n callback_gen=progress_callback,\n )\n\n # Test VRAM allocation with a full-length forward pass\n input_ids = torch.zeros((1, self.config.max_input_len), dtype=torch.long)\n self.draft_model.forward(input_ids, cache=self.cache, preprocess_only=True)\n\n # Load model\n self.model = ExLlamaV2(self.config)\n if not self.quiet:\n logger.info(\"Loading model: \" + self.config.model_dir)\n\n if not self.gpu_split_auto:\n for value in self.model.load_gen(\n self.gpu_split, callback_gen=progress_callback\n ):\n if isinstance(value, str):\n yield value\n\n batch_size = 2 if self.use_cfg else 1\n if self.cache_fp8:\n self.cache = ExLlamaV2Cache_8bit(\n self.model, lazy=self.gpu_split_auto, batch_size=batch_size\n )\n else:\n self.cache = ExLlamaV2Cache(\n self.model, lazy=self.gpu_split_auto, batch_size=batch_size\n )\n\n if self.gpu_split_auto:\n reserve = [AUTO_SPLIT_RESERVE_BYTES] + [0] * 16\n yield from self.model.load_autosplit_gen(\n self.cache,\n reserve_vram=reserve,\n last_id_only=True,\n callback_gen=progress_callback,\n )\n\n # Test VRAM allocation with a full-length forward pass\n input_ids = torch.zeros((1, self.config.max_input_len), dtype=torch.long)\n self.model.forward(input_ids, cache=self.cache, preprocess_only=True)\n\n # Create generator\n self.generator = ExLlamaV2StreamingGenerator(\n self.model,\n self.cache,\n self.tokenizer,\n self.draft_model,\n self.draft_cache,\n )\n\n logger.info(\"Model successfully loaded.\")\n\n def unload(self, loras_only: bool = False):\n \"\"\"\n Free all VRAM resources used by this model\n \"\"\"\n\n for lora in self.active_loras:\n lora.unload()\n\n self.active_loras = []\n\n # Unload the entire model if not just unloading loras\n if not loras_only:\n if self.model:\n self.model.unload()\n self.model = None\n\n if self.draft_model:\n self.draft_model.unload()\n self.draft_model = None\n\n self.config = None\n self.cache = None\n self.tokenizer = None\n self.generator = None\n\n gc.collect()\n torch.cuda.empty_cache()\n\n def get_tokens(self, text: Optional[str], ids: Optional[List[int]], **kwargs):\n \"\"\"Common function for token operations\"\"\"\n if text:\n # Assume token encoding\n return self.tokenizer.encode(\n text,\n add_bos=unwrap(kwargs.get(\"add_bos_token\"), True),\n encode_special_tokens=unwrap(kwargs.get(\"encode_special_tokens\"), True),\n )\n if ids:\n # Assume token decoding\n ids = torch.tensor([ids])\n return self.tokenizer.decode(\n ids,\n decode_special_tokens=unwrap(kwargs.get(\"decode_special_tokens\"), True),\n )[0]\n\n return None\n\n def get_special_tokens(self, add_bos_token: bool, ban_eos_token: bool):\n return {\n \"bos_token\": self.tokenizer.bos_token if add_bos_token else \"\",\n \"eos_token\": self.tokenizer.eos_token if not ban_eos_token else \"\",\n \"pad_token\": self.tokenizer.pad_token,\n \"unk_token\": self.tokenizer.unk_token,\n }\n\n def check_unsupported_settings(self, **kwargs):\n # Warn of unsupported settings if the setting is enabled\n if (unwrap(kwargs.get(\"mirostat\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"mirostat\"\n ):\n logger.warning(\n \"Mirostat sampling is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"min_p\"), 0.0)) not in [0.0, 1.0] and not hasattr(\n ExLlamaV2Sampler.Settings, \"min_p\"\n ):\n logger.warning(\n \"Min-P sampling is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"tfs\"), 0.0)) not in [0.0, 1.0] and not hasattr(\n ExLlamaV2Sampler.Settings, \"tfs\"\n ):\n logger.warning(\n \"Tail-free sampling (TFS) is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"temperature_last\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"temperature_last\"\n ):\n logger.warning(\n \"Temperature last is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"top_a\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"top_a\"\n ):\n logger.warning(\n \"Top-A is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"presence_penalty\"), 0.0)) != 0.0 and not hasattr(\n ExLlamaV2Sampler.Settings, \"token_presence_penalty\"\n ):\n logger.warning(\n \"Presence penalty is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n def generate(self, prompt: str, **kwargs):\n \"\"\"Generate a response to a prompt\"\"\"\n generation = list(self.generate_gen(prompt, **kwargs))\n if generation:\n response = \"\".join(map(lambda chunk: chunk[0], generation))\n return response, generation[-1][1], generation[-1][2]\n\n return \"\", 0, 0\n\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n def generate_gen(self, prompt: str, **kwargs):\n \"\"\"\n Create generator function for prompt completion\n\n Args:\n prompt (str): Input prompt\n **kwargs:\n 'token_healing' (bool): Use token healing (default: False)\n 'temperature' (float): Sampling temperature (default: 1.0)\n 'temperature_last' (bool): Apply temperature after all other\n samplers (default: False)\n 'top_k' (int): Sampling top-K (default: 0)\n 'top_p' (float): Sampling top-P (default: 1.0)\n 'min_p' (float): Sampling min-P (default: 0.0)\n 'tfs' (float): Tail-free sampling (default: 0.0)\n 'typical' (float): Sampling typical (default: 0.0)\n 'mirostat' (bool): Use Mirostat (default: False)\n 'mirostat_tau' (float) Mirostat tau parameter (default: 1.5)\n 'mirostat_eta' (float) Mirostat eta parameter (default: 0.1)\n 'frequency_penalty' (float): Token frequency penalty (default: 0.0)\n 'presence_penalty' (float): Token presence penalty (default: 0.0)\n 'repetition_penalty' (float): Token repetition penalty\n (default: 1.15)\n 'penalty_range' (int): Penalty range\n (default: whole context)\n 'repetition_decay' (int): Repetition penalty range\n (default: same as range)\n 'stop' (List[Union[str, int]]): List of stop strings/tokens to\n end response (default: [EOS])\n 'max_tokens' (int): Max no. tokens in response (default: 150)\n 'add_bos_token' (bool): Adds the BOS token to the start of the\n prompt (default: True)\n 'ban_eos_token' (bool): Bans the EOS token from generation\n (default: False)\n 'logit_bias' (Dict[int, float]): Biases specific tokens to\n either show up more or less (default: None)\n 'stream_interval' (float): Interval in seconds between each\n output chunk (default: immediate)\n 'generate_window' (int): Space to reserve at the end of the\n model's context when generating. Rolls context window by\n the same amount if context length is exceeded to allow\n generating pastthe models max_seq_len.\n \"\"\"\n\n token_healing = unwrap(kwargs.get(\"token_healing\"), False)\n max_tokens = unwrap(kwargs.get(\"max_tokens\"), 150)\n stream_interval = unwrap(kwargs.get(\"stream_interval\"), 0)\n generate_window = min(unwrap(kwargs.get(\"generate_window\"), 512), max_tokens)\n\n # Sampler settings\n gen_settings = ExLlamaV2Sampler.Settings()\n\n self.check_unsupported_settings(**kwargs)\n\n # Apply settings\n gen_settings.temperature = unwrap(kwargs.get(\"temperature\"), 1.0)\n gen_settings.temperature_last = unwrap(kwargs.get(\"temperature_last\"), False)\n gen_settings.top_k = unwrap(kwargs.get(\"top_k\"), 0)\n gen_settings.top_p = unwrap(kwargs.get(\"top_p\"), 1.0)\n gen_settings.top_a = unwrap(kwargs.get(\"top_a\"), 0.0)\n gen_settings.min_p = unwrap(kwargs.get(\"min_p\"), 0.0)\n gen_settings.tfs = unwrap(kwargs.get(\"tfs\"), 1.0)\n gen_settings.typical = unwrap(kwargs.get(\"typical\"), 1.0)\n gen_settings.mirostat = unwrap(kwargs.get(\"mirostat\"), False)\n\n # Default tau and eta fallbacks don't matter if mirostat is off\n gen_settings.mirostat_tau = unwrap(kwargs.get(\"mirostat_tau\"), 1.5)\n gen_settings.mirostat_eta = unwrap(kwargs.get(\"mirostat_eta\"), 0.1)\n\n # Set CFG scale and negative prompt\n cfg_scale = unwrap(kwargs.get(\"cfg_scale\"), 1.0)\n negative_prompt = None\n if cfg_scale not in [None, 1.0]:\n if self.use_cfg:\n gen_settings.cfg_scale = cfg_scale\n\n # If the negative prompt is empty, use the BOS token\n negative_prompt = unwrap(\n kwargs.get(\"negative_prompt\"), self.tokenizer.bos_token\n )\n else:\n logger.warn(\n \"CFG is currently disabled. \"\n + \"Please reload your model with use_cfg = True.\",\n )\n\n gen_settings.token_presence_penalty = unwrap(\n kwargs.get(\"presence_penalty\"), 0.0\n )\n gen_settings.token_repetition_penalty = unwrap(\n kwargs.get(\"repetition_penalty\"), 1.0\n )\n\n # Applies for all penalties despite being called token_repetition_range\n gen_settings.token_repetition_range = unwrap(\n kwargs.get(\"penalty_range\"), self.config.max_seq_len\n )\n auto_scale_penalty_range = False\n\n frequency_penalty = unwrap(kwargs.get(\"frequency_penalty\"), 0.0)\n if hasattr(gen_settings, \"token_frequency_penalty\"):\n gen_settings.token_frequency_penalty = frequency_penalty\n\n # Dynamically scale penalty range to output tokens\n # Only do this if freq/pres pen is enabled\n # and the repetition range is -1\n auto_scale_penalty_range = (\n gen_settings.token_frequency_penalty != 0\n or gen_settings.token_presence_penalty != 0\n ) and gen_settings.token_repetition_range == -1\n elif frequency_penalty != 0.0:\n logger.warning(\n \"Frequency penalty is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n # Override the repetition penalty value if it isn't set already\n # if the user is on an older exl2 version\n if unwrap(gen_settings.token_repetition_penalty, 1.0) == 1.0:\n gen_settings.token_repetition_penalty = frequency_penalty\n logger.warning(\"Setting this value to repetition penalty instead.\")\n\n # Always make sure the fallback is 0 if range < 0\n # It's technically fine to use -1, but this just validates the passed\n # fallback\n # Always default to 0 if something goes wrong\n if gen_settings.token_repetition_range < 0:\n fallback_decay = 0\n else:\n fallback_decay = gen_settings.token_repetition_range\n gen_settings.token_repetition_decay = coalesce(\n kwargs.get(\"repetition_decay\"), fallback_decay, 0\n )\n\n stop_conditions: List[Union[str, int]] = unwrap(kwargs.get(\"stop\"), [])\n add_bos_token = unwrap(kwargs.get(\"add_bos_token\"), True)\n ban_eos_token = unwrap(kwargs.get(\"ban_eos_token\"), False)\n logit_bias = kwargs.get(\"logit_bias\")\n\n # Override sampler settings for temp = 0\n if gen_settings.temperature == 0:\n gen_settings.temperature = 1.0\n gen_settings.top_k = 1\n gen_settings.top_p = 0\n gen_settings.typical = 0\n\n # Log generation options to console\n # Some options are too large, so log the args instead\n log_generation_params(\n max_tokens=max_tokens,\n **vars(gen_settings),\n token_healing=token_healing,\n auto_scale_penalty_range=auto_scale_penalty_range,\n add_bos_token=add_bos_token,\n ban_eos_token=ban_eos_token,\n stop_conditions=stop_conditions,\n logit_bias=logit_bias,\n )\n\n # Log prompt to console\n log_prompt(prompt, negative_prompt)\n\n # Set logit bias\n if logit_bias:\n # Create a vocab tensor if it doesn't exist for token biasing\n if gen_settings.token_bias is None:\n padding = -self.tokenizer.config.vocab_size % 32\n gen_settings.token_bias = torch.zeros(\n (self.tokenizer.config.vocab_size + padding,),\n dtype=torch.float,\n )\n\n # Map logits to the tensor with their biases\n for token, bias in logit_bias.items():\n gen_settings.token_bias[token] = bias\n\n # Ban the EOS token if specified. If not, append to stop conditions\n # as well.\n # Set this below logging to avoid polluting the stop strings array\n if ban_eos_token:\n gen_settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])\n else:\n stop_conditions.append(self.tokenizer.eos_token_id)\n\n # Stop conditions\n self.generator.set_stop_conditions(stop_conditions)\n\n # Tokenized context\n ids, offsets = self.tokenizer.encode(\n [prompt, negative_prompt]\n if negative_prompt and gen_settings.cfg_scale not in [None, 1.0]\n else prompt,\n add_bos=add_bos_token,\n encode_special_tokens=True,\n return_offsets=True,\n )\n mask = (\n self.tokenizer.padding_mask(ids)\n if self.use_cfg and gen_settings.cfg_scale not in [None, 1.0]\n else None\n )\n context_len = len(ids[0])\n\n if context_len > self.config.max_seq_len:\n logger.warning(\n f\"Context length {context_len} is greater than max_seq_len \"\n f\"{self.config.max_seq_len}. Generation is truncated and \"\n \"metrics may not be accurate.\"\n )\n\n prompt_tokens = ids.shape[-1]\n\n # Begin\n generated_tokens = 0\n full_response = \"\"\n start_time = time.time()\n last_chunk_time = start_time\n\n save_tokens = torch.empty((ids.shape[0], 0), dtype=torch.bool)\n chunk_buffer = \"\"\n chunk_tokens = 0\n\n while True:\n # Ingest prompt\n if chunk_tokens == 0:\n ids = torch.cat((ids, save_tokens), dim=-1)\n save_tokens = torch.empty((ids.shape[0], 0), dtype=torch.bool)\n overflow = ids.shape[-1] + generate_window - self.config.max_seq_len\n active_ids = ids[:, max(0, overflow) :]\n chunk_tokens = self.config.max_seq_len - active_ids.shape[-1]\n\n # Split for exllama versions that have CFG\n if self.use_cfg:\n self.generator.begin_stream(\n active_ids,\n gen_settings,\n token_healing=token_healing,\n loras=self.active_loras,\n input_mask=mask,\n position_offsets=offsets,\n )\n else:\n self.generator.begin_stream(\n active_ids,\n gen_settings,\n token_healing=token_healing,\n loras=self.active_loras,\n )\n\n # Reset offsets for subsequent passes if the context is truncated\n offsets = None\n\n if auto_scale_penalty_range:\n gen_settings.token_repetition_range = generated_tokens\n\n # Generate\n chunk, eos, tokens = self.generator.stream()\n\n if token_healing:\n # Extract healed token\n ids[:, -1] = self.generator.sequence_ids[:, -2]\n token_healing = False\n\n save_tokens = torch.cat(\n (save_tokens, tokens.expand(save_tokens.shape[0], -1)), dim=-1\n )\n chunk_buffer += chunk\n\n generated_tokens += 1\n chunk_tokens -= 1\n\n # Yield output\n now = time.time()\n elapsed = now - last_chunk_time\n\n if chunk_buffer != \"\" and (\n elapsed > stream_interval or eos or generated_tokens == max_tokens\n ):\n yield chunk_buffer, prompt_tokens, generated_tokens\n full_response += chunk_buffer\n chunk_buffer = \"\"\n last_chunk_time = now\n\n if eos or generated_tokens == max_tokens:\n break\n\n # Print response\n log_response(full_response)\n\n elapsed_time = last_chunk_time - start_time\n\n initial_response = (\n f\"Metrics: {generated_tokens} tokens generated in \"\n f\"{round(elapsed_time, 2)} seconds\"\n )\n itemization = []\n extra_parts = []\n\n # Add tokens per second\n tokens_per_second = (\n \"Indeterminate\"\n if elapsed_time == 0\n else round(generated_tokens / elapsed_time, 2)\n )\n itemization.append(f\"{tokens_per_second} T/s\")\n\n # Add context (original token count)\n if ids is not None:\n itemization.append(f\"context {context_len} tokens\")\n\n if context_len > self.config.max_seq_len:\n extra_parts.append(\"<-- Not accurate (truncated)\")\n\n # Print output\n logger.info(\n initial_response\n + \" (\"\n + \", \".join(itemization)\n + \") \"\n + \" \".join(extra_parts)\n )" }, { "identifier": "CompletionRequest", "path": "OAI/types/completion.py", "snippet": "class CompletionRequest(CommonCompletionRequest):\n \"\"\"Represents a completion request.\"\"\"\n\n # Prompt can also contain token ids, but that's out of scope\n # for this project.\n prompt: Union[str, List[str]]" }, { "identifier": "ChatCompletionRequest", "path": "OAI/types/chat_completion.py", "snippet": "class ChatCompletionRequest(CommonCompletionRequest):\n # Messages\n # Take in a string as well even though it's not part of the OAI spec\n messages: Union[str, List[Dict[str, str]]]\n prompt_template: Optional[str] = None\n add_generation_prompt: Optional[bool] = True" }, { "identifier": "LoraCard", "path": "OAI/types/lora.py", "snippet": "class LoraCard(BaseModel):\n \"\"\"Represents a single Lora card.\"\"\"\n\n id: str = \"test\"\n object: str = \"lora\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n scaling: Optional[float] = None" }, { "identifier": "LoraList", "path": "OAI/types/lora.py", "snippet": "class LoraList(BaseModel):\n \"\"\"Represents a list of Lora cards.\"\"\"\n\n object: str = \"list\"\n data: List[LoraCard] = Field(default_factory=list)" }, { "identifier": "LoraLoadRequest", "path": "OAI/types/lora.py", "snippet": "class LoraLoadRequest(BaseModel):\n \"\"\"Represents a Lora load request.\"\"\"\n\n loras: List[LoraLoadInfo]" }, { "identifier": "LoraLoadResponse", "path": "OAI/types/lora.py", "snippet": "class LoraLoadResponse(BaseModel):\n \"\"\"Represents a Lora load response.\"\"\"\n\n success: List[str] = Field(default_factory=list)\n failure: List[str] = Field(default_factory=list)" }, { "identifier": "ModelCard", "path": "OAI/types/model.py", "snippet": "class ModelCard(BaseModel):\n \"\"\"Represents a single model card.\"\"\"\n\n id: str = \"test\"\n object: str = \"model\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n logging: Optional[LogPreferences] = None\n parameters: Optional[ModelCardParameters] = None" }, { "identifier": "ModelLoadRequest", "path": "OAI/types/model.py", "snippet": "class ModelLoadRequest(BaseModel):\n \"\"\"Represents a model load request.\"\"\"\n\n name: str\n\n # Max seq len is fetched from config.json of the model by default\n max_seq_len: Optional[int] = Field(\n description=\"Leave this blank to use the model's base sequence length\",\n default=None,\n examples=[4096],\n )\n override_base_seq_len: Optional[int] = Field(\n description=(\n \"Overrides the model's base sequence length. \" \"Leave blank if unsure\"\n ),\n default=None,\n examples=[4096],\n )\n gpu_split_auto: Optional[bool] = True\n gpu_split: Optional[List[float]] = Field(\n default_factory=list, examples=[[24.0, 20.0]]\n )\n rope_scale: Optional[float] = Field(\n description=\"Automatically pulled from the model's config if not present\",\n default=None,\n examples=[1.0],\n )\n rope_alpha: Optional[float] = Field(\n description=\"Automatically calculated if not present\",\n default=None,\n examples=[1.0],\n )\n no_flash_attention: Optional[bool] = False\n # low_mem: Optional[bool] = False\n cache_mode: Optional[str] = \"FP16\"\n prompt_template: Optional[str] = None\n num_experts_per_token: Optional[int] = None\n use_cfg: Optional[bool] = None\n draft: Optional[DraftModelLoadRequest] = None" }, { "identifier": "ModelLoadResponse", "path": "OAI/types/model.py", "snippet": "class ModelLoadResponse(BaseModel):\n \"\"\"Represents a model load response.\"\"\"\n\n # Avoids pydantic namespace warning\n model_config = ConfigDict(protected_namespaces=[])\n\n model_type: str = \"model\"\n module: int\n modules: int\n status: str" }, { "identifier": "ModelCardParameters", "path": "OAI/types/model.py", "snippet": "class ModelCardParameters(BaseModel):\n \"\"\"Represents model card parameters.\"\"\"\n\n # Safe to do this since it's guaranteed to fetch a max seq len\n # from model_container\n max_seq_len: Optional[int] = None\n rope_scale: Optional[float] = 1.0\n rope_alpha: Optional[float] = 1.0\n cache_mode: Optional[str] = \"FP16\"\n prompt_template: Optional[str] = None\n num_experts_per_token: Optional[int] = None\n use_cfg: Optional[bool] = None\n draft: Optional[\"ModelCard\"] = None" }, { "identifier": "TemplateList", "path": "OAI/types/template.py", "snippet": "class TemplateList(BaseModel):\n \"\"\"Represents a list of templates.\"\"\"\n\n object: str = \"list\"\n data: List[str] = Field(default_factory=list)" }, { "identifier": "TokenEncodeRequest", "path": "OAI/types/token.py", "snippet": "class TokenEncodeRequest(CommonTokenRequest):\n \"\"\"Represents a tokenization request.\"\"\"\n\n text: str" }, { "identifier": "TokenEncodeResponse", "path": "OAI/types/token.py", "snippet": "class TokenEncodeResponse(BaseModel):\n \"\"\"Represents a tokenization response.\"\"\"\n\n tokens: List[int]\n length: int" }, { "identifier": "TokenDecodeRequest", "path": "OAI/types/token.py", "snippet": "class TokenDecodeRequest(CommonTokenRequest):\n \"\"\" \" Represents a detokenization request.\"\"\"\n\n tokens: List[int]" }, { "identifier": "TokenDecodeResponse", "path": "OAI/types/token.py", "snippet": "class TokenDecodeResponse(BaseModel):\n \"\"\"Represents a detokenization response.\"\"\"\n\n text: str" }, { "identifier": "create_completion_response", "path": "OAI/utils_oai.py", "snippet": "def create_completion_response(\n text: str,\n prompt_tokens: int,\n completion_tokens: int,\n model_name: Optional[str],\n):\n \"\"\"Create a completion response from the provided text.\"\"\"\n choice = CompletionRespChoice(finish_reason=\"Generated\", text=text)\n\n response = CompletionResponse(\n choices=[choice],\n model=unwrap(model_name, \"\"),\n usage=UsageStats(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n ),\n )\n\n return response" }, { "identifier": "get_model_list", "path": "OAI/utils_oai.py", "snippet": "def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):\n \"\"\"Get the list of models from the provided path.\"\"\"\n\n # Convert the provided draft model path to a pathlib path for\n # equality comparisons\n if draft_model_path:\n draft_model_path = pathlib.Path(draft_model_path).resolve()\n\n model_card_list = ModelList()\n for path in model_path.iterdir():\n # Don't include the draft models path\n if path.is_dir() and path != draft_model_path:\n model_card = ModelCard(id=path.name)\n model_card_list.data.append(model_card) # pylint: disable=no-member\n\n return model_card_list" }, { "identifier": "get_lora_list", "path": "OAI/utils_oai.py", "snippet": "def get_lora_list(lora_path: pathlib.Path):\n \"\"\"Get the list of Lora cards from the provided path.\"\"\"\n lora_list = LoraList()\n for path in lora_path.iterdir():\n if path.is_dir():\n lora_card = LoraCard(id=path.name)\n lora_list.data.append(lora_card) # pylint: disable=no-member\n\n return lora_list" }, { "identifier": "create_chat_completion_response", "path": "OAI/utils_oai.py", "snippet": "def create_chat_completion_response(\n text: str,\n prompt_tokens: int,\n completion_tokens: int,\n model_name: Optional[str],\n):\n \"\"\"Create a chat completion response from the provided text.\"\"\"\n message = ChatCompletionMessage(role=\"assistant\", content=text)\n\n choice = ChatCompletionRespChoice(finish_reason=\"Generated\", message=message)\n\n response = ChatCompletionResponse(\n choices=[choice],\n model=unwrap(model_name, \"\"),\n usage=UsageStats(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n ),\n )\n\n return response" }, { "identifier": "create_chat_completion_stream_chunk", "path": "OAI/utils_oai.py", "snippet": "def create_chat_completion_stream_chunk(\n const_id: str,\n text: Optional[str] = None,\n model_name: Optional[str] = None,\n finish_reason: Optional[str] = None,\n):\n \"\"\"Create a chat completion stream chunk from the provided text.\"\"\"\n if finish_reason:\n message = {}\n else:\n message = ChatCompletionMessage(role=\"assistant\", content=text)\n\n # The finish reason can be None\n choice = ChatCompletionStreamChoice(finish_reason=finish_reason, delta=message)\n\n chunk = ChatCompletionStreamChunk(\n id=const_id, choices=[choice], model=unwrap(model_name, \"\")\n )\n\n return chunk" }, { "identifier": "get_all_templates", "path": "templating.py", "snippet": "def get_all_templates():\n \"\"\"Fetches all templates from the templates directory\"\"\"\n\n template_directory = pathlib.Path(\"templates\")\n return template_directory.glob(\"*.jinja\")" }, { "identifier": "get_prompt_from_template", "path": "templating.py", "snippet": "def get_prompt_from_template(\n messages,\n prompt_template: PromptTemplate,\n add_generation_prompt: bool,\n special_tokens: Optional[Dict[str, str]] = None,\n):\n \"\"\"Get a prompt from a template and a list of messages.\"\"\"\n if version.parse(package_version(\"jinja2\")) < version.parse(\"3.0.0\"):\n raise ImportError(\n \"Parsing these chat completion messages requires jinja2 3.0.0 \"\n f\"or greater. Current version: {package_version('jinja2')}\\n\"\n \"Please upgrade jinja by running the following command: \"\n \"pip install --upgrade jinja2\"\n )\n\n compiled_template = _compile_template(prompt_template.template)\n return compiled_template.render(\n messages=messages,\n add_generation_prompt=add_generation_prompt,\n **special_tokens,\n )" }, { "identifier": "get_generator_error", "path": "utils.py", "snippet": "def get_generator_error(message: str):\n \"\"\"Get a generator error.\"\"\"\n error_message = TabbyGeneratorErrorMessage(\n message=message, trace=traceback.format_exc()\n )\n\n generator_error = TabbyGeneratorError(error=error_message)\n\n # Log and send the exception\n logger.error(generator_error.error.message)\n return get_sse_packet(generator_error.model_dump_json())" }, { "identifier": "get_sse_packet", "path": "utils.py", "snippet": "def get_sse_packet(json_data: str):\n \"\"\"Get an SSE packet.\"\"\"\n return f\"data: {json_data}\\n\\n\"" }, { "identifier": "load_progress", "path": "utils.py", "snippet": "def load_progress(module, modules):\n \"\"\"Wrapper callback for load progress.\"\"\"\n yield module, modules" }, { "identifier": "unwrap", "path": "utils.py", "snippet": "def unwrap(wrapped, default=None):\n \"\"\"Unwrap function for Optionals.\"\"\"\n if wrapped is None:\n return default\n\n return wrapped" }, { "identifier": "init_logger", "path": "logger.py", "snippet": "def init_logger(name: str):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(_default_handler)\n logger.propagate = False\n return logger" } ]
import pathlib import uvicorn import gen_logging from asyncio import CancelledError from typing import Optional from uuid import uuid4 from jinja2 import TemplateError from fastapi import FastAPI, Depends, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse from functools import partial from progress.bar import IncrementalBar from args import convert_args_to_dict, init_argparser from auth import check_admin_key, check_api_key, load_auth_keys from config import ( override_config_from_args, read_config_from_file, get_gen_logging_config, get_model_config, get_draft_model_config, get_lora_config, get_network_config, ) from generators import call_with_semaphore, generate_with_semaphore from model import ModelContainer from OAI.types.completion import CompletionRequest from OAI.types.chat_completion import ChatCompletionRequest from OAI.types.lora import LoraCard, LoraList, LoraLoadRequest, LoraLoadResponse from OAI.types.model import ( ModelCard, ModelLoadRequest, ModelLoadResponse, ModelCardParameters, ) from OAI.types.template import TemplateList from OAI.types.token import ( TokenEncodeRequest, TokenEncodeResponse, TokenDecodeRequest, TokenDecodeResponse, ) from OAI.utils_oai import ( create_completion_response, get_model_list, get_lora_list, create_chat_completion_response, create_chat_completion_stream_chunk, ) from templating import get_all_templates, get_prompt_from_template from utils import get_generator_error, get_sse_packet, load_progress, unwrap from logger import init_logger
14,159
yield get_sse_packet("[DONE]") except CancelledError: logger.error("Completion request cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, data.prompt, **data.to_gen_params()) ) response = create_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response # Chat completions endpoint @app.post( "/v1/chat/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_chat_completion(request: Request, data: ChatCompletionRequest): """Generates a chat completion from a prompt.""" if MODEL_CONTAINER.prompt_template is None: raise HTTPException( 422, "This endpoint is disabled because a prompt template is not set.", ) model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.messages, str): prompt = data.messages else: try: special_tokens_dict = MODEL_CONTAINER.get_special_tokens( unwrap(data.add_bos_token, True), unwrap(data.ban_eos_token, False), ) prompt = get_prompt_from_template( data.messages, MODEL_CONTAINER.prompt_template, data.add_generation_prompt, special_tokens_dict, ) except KeyError as exc: raise HTTPException( 400, "Could not find a Conversation from prompt template " f"'{MODEL_CONTAINER.prompt_template.name}'. " "Check your spelling?", ) from exc except TemplateError as exc: raise HTTPException( 400, f"TemplateError: {str(exc)}", ) from exc if data.stream: const_id = f"chatcmpl-{uuid4().hex}" async def generator(): """Generator for the generation process.""" try: new_generation = MODEL_CONTAINER.generate_gen( prompt, **data.to_gen_params() ) for part, _, _ in new_generation: if await request.is_disconnected(): break response = create_chat_completion_stream_chunk( const_id, part, model_path.name ) yield get_sse_packet(response.model_dump_json()) # Yield a finish response on successful generation finish_response = create_chat_completion_stream_chunk( const_id, finish_reason="stop" ) yield get_sse_packet(finish_response.model_dump_json()) except CancelledError: logger.error("Chat completion cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, prompt, **data.to_gen_params()) ) response = create_chat_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response def entrypoint(args: Optional[dict] = None): """Entry function for program startup""" global MODEL_CONTAINER # Load from YAML config read_config_from_file(pathlib.Path("config.yml")) # Parse and override config from args if args is None: parser = init_argparser() args = convert_args_to_dict(parser.parse_args(), parser)
"""The main tabbyAPI module. Contains the FastAPI server and endpoints.""" logger = init_logger(__name__) app = FastAPI( title="TabbyAPI", summary="An OAI compatible exllamav2 API that's both lightweight and fast", description=( "This docs page is not meant to send requests! Please use a service " "like Postman or a frontend UI." ), ) # Globally scoped variables. Undefined until initalized in main MODEL_CONTAINER: Optional[ModelContainer] = None def _check_model_container(): if MODEL_CONTAINER is None or MODEL_CONTAINER.model is None: raise HTTPException(400, "No models are loaded.") # ALlow CORS requests app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Model list endpoint @app.get("/v1/models", dependencies=[Depends(check_api_key)]) @app.get("/v1/model/list", dependencies=[Depends(check_api_key)]) async def list_models(): """Lists all models in the model directory.""" model_config = get_model_config() model_dir = unwrap(model_config.get("model_dir"), "models") model_path = pathlib.Path(model_dir) draft_model_dir = get_draft_model_config().get("draft_model_dir") models = get_model_list(model_path.resolve(), draft_model_dir) if unwrap(model_config.get("use_dummy_models"), False): models.data.insert(0, ModelCard(id="gpt-3.5-turbo")) return models # Currently loaded model endpoint @app.get( "/v1/model", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) @app.get( "/v1/internal/model/info", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_current_model(): """Returns the currently loaded model.""" model_name = MODEL_CONTAINER.get_model_path().name prompt_template = MODEL_CONTAINER.prompt_template model_card = ModelCard( id=model_name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.config.max_seq_len, cache_mode="FP8" if MODEL_CONTAINER.cache_fp8 else "FP16", prompt_template=prompt_template.name if prompt_template else None, num_experts_per_token=MODEL_CONTAINER.config.num_experts_per_token, use_cfg=MODEL_CONTAINER.use_cfg, ), logging=gen_logging.PREFERENCES, ) if MODEL_CONTAINER.draft_config: draft_card = ModelCard( id=MODEL_CONTAINER.get_model_path(True).name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.draft_config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.draft_config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.draft_config.max_seq_len, ), ) model_card.parameters.draft = draft_card return model_card @app.get("/v1/model/draft/list", dependencies=[Depends(check_api_key)]) async def list_draft_models(): """Lists all draft models in the model directory.""" draft_model_dir = unwrap(get_draft_model_config().get("draft_model_dir"), "models") draft_model_path = pathlib.Path(draft_model_dir) models = get_model_list(draft_model_path.resolve()) return models # Load model endpoint @app.post("/v1/model/load", dependencies=[Depends(check_admin_key)]) async def load_model(request: Request, data: ModelLoadRequest): """Loads a model into the model container.""" global MODEL_CONTAINER if MODEL_CONTAINER and MODEL_CONTAINER.model: raise HTTPException(400, "A model is already loaded! Please unload it first.") if not data.name: raise HTTPException(400, "model_name not found.") model_path = pathlib.Path(unwrap(get_model_config().get("model_dir"), "models")) model_path = model_path / data.name load_data = data.model_dump() if data.draft: if not data.draft.draft_model_name: raise HTTPException( 400, "draft_model_name was not found inside the draft object." ) load_data["draft"]["draft_model_dir"] = unwrap( get_draft_model_config().get("draft_model_dir"), "models" ) if not model_path.exists(): raise HTTPException(400, "model_path does not exist. Check model_name?") MODEL_CONTAINER = ModelContainer(model_path.resolve(), False, **load_data) async def generator(): """Generator for the loading process.""" model_type = "draft" if MODEL_CONTAINER.draft_config else "model" load_status = MODEL_CONTAINER.load_gen(load_progress) try: for module, modules in load_status: if await request.is_disconnected(): break if module == 0: loading_bar: IncrementalBar = IncrementalBar("Modules", max=modules) elif module == modules: loading_bar.next() loading_bar.finish() response = ModelLoadResponse( model_type=model_type, module=module, modules=modules, status="finished", ) yield get_sse_packet(response.model_dump_json()) # Switch to model progress if the draft model is loaded if MODEL_CONTAINER.draft_config: model_type = "model" else: loading_bar.next() response = ModelLoadResponse( model_type=model_type, module=module, modules=modules, status="processing", ) yield get_sse_packet(response.model_dump_json()) except CancelledError: logger.error( "Model load cancelled by user. " "Please make sure to run unload to free up resources." ) except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse(generator(), media_type="text/event-stream") # Unload model endpoint @app.post( "/v1/model/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_model(): """Unloads the currently loaded model.""" global MODEL_CONTAINER MODEL_CONTAINER.unload() MODEL_CONTAINER = None @app.get("/v1/templates", dependencies=[Depends(check_api_key)]) @app.get("/v1/template/list", dependencies=[Depends(check_api_key)]) async def get_templates(): templates = get_all_templates() template_strings = list(map(lambda template: template.stem, templates)) return TemplateList(data=template_strings) # Lora list endpoint @app.get("/v1/loras", dependencies=[Depends(check_api_key)]) @app.get("/v1/lora/list", dependencies=[Depends(check_api_key)]) async def get_all_loras(): """Lists all LoRAs in the lora directory.""" lora_path = pathlib.Path(unwrap(get_lora_config().get("lora_dir"), "loras")) loras = get_lora_list(lora_path.resolve()) return loras # Currently loaded loras endpoint @app.get( "/v1/lora", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_active_loras(): """Returns the currently loaded loras.""" active_loras = LoraList( data=list( map( lambda lora: LoraCard( id=pathlib.Path(lora.lora_path).parent.name, scaling=lora.lora_scaling * lora.lora_r / lora.lora_alpha, ), MODEL_CONTAINER.active_loras, ) ) ) return active_loras # Load lora endpoint @app.post( "/v1/lora/load", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def load_lora(data: LoraLoadRequest): """Loads a LoRA into the model container.""" if not data.loras: raise HTTPException(400, "List of loras to load is not found.") lora_dir = pathlib.Path(unwrap(get_lora_config().get("lora_dir"), "loras")) if not lora_dir.exists(): raise HTTPException( 400, "A parent lora directory does not exist. Check your config.yml?", ) # Clean-up existing loras if present if len(MODEL_CONTAINER.active_loras) > 0: MODEL_CONTAINER.unload(True) result = MODEL_CONTAINER.load_loras(lora_dir, **data.model_dump()) return LoraLoadResponse( success=unwrap(result.get("success"), []), failure=unwrap(result.get("failure"), []), ) # Unload lora endpoint @app.post( "/v1/lora/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_loras(): """Unloads the currently loaded loras.""" MODEL_CONTAINER.unload(True) # Encode tokens endpoint @app.post( "/v1/token/encode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def encode_tokens(data: TokenEncodeRequest): """Encodes a string into tokens.""" raw_tokens = MODEL_CONTAINER.get_tokens(data.text, None, **data.get_params()) # Have to use this if check otherwise Torch's tensors error out # with a boolean issue tokens = raw_tokens[0].tolist() if raw_tokens is not None else [] response = TokenEncodeResponse(tokens=tokens, length=len(tokens)) return response # Decode tokens endpoint @app.post( "/v1/token/decode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def decode_tokens(data: TokenDecodeRequest): """Decodes tokens into a string.""" message = MODEL_CONTAINER.get_tokens(None, data.tokens, **data.get_params()) response = TokenDecodeResponse(text=unwrap(message, "")) return response # Completions endpoint @app.post( "/v1/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_completion(request: Request, data: CompletionRequest): """Generates a completion from a prompt.""" model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.prompt, list): data.prompt = "\n".join(data.prompt) if data.stream: async def generator(): """Generator for the generation process.""" try: new_generation = MODEL_CONTAINER.generate_gen( data.prompt, **data.to_gen_params() ) for part, prompt_tokens, completion_tokens in new_generation: if await request.is_disconnected(): break response = create_completion_response( part, prompt_tokens, completion_tokens, model_path.name ) yield get_sse_packet(response.model_dump_json()) # Yield a finish response on successful generation yield get_sse_packet("[DONE]") except CancelledError: logger.error("Completion request cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, data.prompt, **data.to_gen_params()) ) response = create_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response # Chat completions endpoint @app.post( "/v1/chat/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_chat_completion(request: Request, data: ChatCompletionRequest): """Generates a chat completion from a prompt.""" if MODEL_CONTAINER.prompt_template is None: raise HTTPException( 422, "This endpoint is disabled because a prompt template is not set.", ) model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.messages, str): prompt = data.messages else: try: special_tokens_dict = MODEL_CONTAINER.get_special_tokens( unwrap(data.add_bos_token, True), unwrap(data.ban_eos_token, False), ) prompt = get_prompt_from_template( data.messages, MODEL_CONTAINER.prompt_template, data.add_generation_prompt, special_tokens_dict, ) except KeyError as exc: raise HTTPException( 400, "Could not find a Conversation from prompt template " f"'{MODEL_CONTAINER.prompt_template.name}'. " "Check your spelling?", ) from exc except TemplateError as exc: raise HTTPException( 400, f"TemplateError: {str(exc)}", ) from exc if data.stream: const_id = f"chatcmpl-{uuid4().hex}" async def generator(): """Generator for the generation process.""" try: new_generation = MODEL_CONTAINER.generate_gen( prompt, **data.to_gen_params() ) for part, _, _ in new_generation: if await request.is_disconnected(): break response = create_chat_completion_stream_chunk( const_id, part, model_path.name ) yield get_sse_packet(response.model_dump_json()) # Yield a finish response on successful generation finish_response = create_chat_completion_stream_chunk( const_id, finish_reason="stop" ) yield get_sse_packet(finish_response.model_dump_json()) except CancelledError: logger.error("Chat completion cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, prompt, **data.to_gen_params()) ) response = create_chat_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response def entrypoint(args: Optional[dict] = None): """Entry function for program startup""" global MODEL_CONTAINER # Load from YAML config read_config_from_file(pathlib.Path("config.yml")) # Parse and override config from args if args is None: parser = init_argparser() args = convert_args_to_dict(parser.parse_args(), parser)
override_config_from_args(args)
5
2023-11-10 05:54:02+00:00
16k
ShipBit/wingman-ai
services/tower.py
[ { "identifier": "MissingApiKeyException", "path": "exceptions.py", "snippet": "class MissingApiKeyException(Exception):\n pass" }, { "identifier": "OpenAiWingman", "path": "wingmen/open_ai_wingman.py", "snippet": "class OpenAiWingman(Wingman):\n \"\"\"Our OpenAI Wingman base gives you everything you need to interact with OpenAI's various APIs.\n\n It transcribes speech to text using Whisper, uses the Completion API for conversation and implements the Tools API to execute functions.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n super().__init__(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n )\n\n self.openai: OpenAi = None # validate will set this\n \"\"\"Our OpenAI API wrapper\"\"\"\n\n # every conversation starts with the \"context\" that the user has configured\n self.messages = [\n {\"role\": \"system\", \"content\": self.config[\"openai\"].get(\"context\")}\n ]\n \"\"\"The conversation history that is used for the GPT calls\"\"\"\n\n self.edge_tts = EdgeTTS(app_root_dir)\n self.last_transcript_locale = None\n self.elevenlabs_api_key = None\n self.azure_keys = {\n \"tts\": None,\n \"whisper\": None,\n \"conversation\": None,\n \"summarize\": None,\n }\n self.stt_provider = self.config[\"features\"].get(\"stt_provider\", None)\n self.conversation_provider = self.config[\"features\"].get(\n \"conversation_provider\", None\n )\n self.summarize_provider = self.config[\"features\"].get(\n \"summarize_provider\", None\n )\n\n def validate(self):\n errors = super().validate()\n openai_api_key = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"openai\",\n friendly_key_name=\"OpenAI API key\",\n prompt_if_missing=True,\n )\n if not openai_api_key:\n errors.append(\n \"Missing 'openai' API key. Please provide a valid key in the settings.\"\n )\n else:\n openai_organization = self.config[\"openai\"].get(\"organization\")\n openai_base_url = self.config[\"openai\"].get(\"base_url\")\n self.openai = OpenAi(openai_api_key, openai_organization, openai_base_url)\n\n self.__validate_elevenlabs_config(errors)\n\n self.__validate_azure_config(errors)\n\n return errors\n\n def __validate_elevenlabs_config(self, errors):\n if self.tts_provider == \"elevenlabs\":\n self.elevenlabs_api_key = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"elevenlabs\",\n friendly_key_name=\"Elevenlabs API key\",\n prompt_if_missing=True,\n )\n if not self.elevenlabs_api_key:\n errors.append(\n \"Missing 'elevenlabs' API key. Please provide a valid key in the settings or use another tts_provider.\"\n )\n return\n elevenlabs_settings = self.config.get(\"elevenlabs\")\n if not elevenlabs_settings:\n errors.append(\n \"Missing 'elevenlabs' section in config. Please provide a valid config or change the TTS provider.\"\n )\n return\n if not elevenlabs_settings.get(\"model\"):\n errors.append(\"Missing 'model' setting in 'elevenlabs' config.\")\n return\n voice_settings = elevenlabs_settings.get(\"voice\")\n if not voice_settings:\n errors.append(\n \"Missing 'voice' section in 'elevenlabs' config. Please provide a voice configuration as shown in our example config.\"\n )\n return\n if not voice_settings.get(\"id\") and not voice_settings.get(\"name\"):\n errors.append(\n \"Missing 'id' or 'name' in 'voice' section of 'elevenlabs' config. Please provide a valid name or id for the voice in your config.\"\n )\n\n def __validate_azure_config(self, errors):\n if (\n self.tts_provider == \"azure\"\n or self.stt_provider == \"azure\"\n or self.conversation_provider == \"azure\"\n or self.summarize_provider == \"azure\"\n ):\n azure_settings = self.config.get(\"azure\")\n if not azure_settings:\n errors.append(\n \"Missing 'azure' section in config. Please provide a valid config.\"\n )\n return\n\n if self.tts_provider == \"azure\":\n self.azure_keys[\"tts\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_tts\",\n friendly_key_name=\"Azure TTS API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"tts\"]:\n errors.append(\n \"Missing 'azure' tts API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.stt_provider == \"azure\":\n self.azure_keys[\"whisper\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_whisper\",\n friendly_key_name=\"Azure Whisper API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"whisper\"]:\n errors.append(\n \"Missing 'azure' whisper API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.conversation_provider == \"azure\":\n self.azure_keys[\"conversation\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_conversation\",\n friendly_key_name=\"Azure Conversation API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"conversation\"]:\n errors.append(\n \"Missing 'azure' conversation API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.summarize_provider == \"azure\":\n self.azure_keys[\"summarize\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_summarize\",\n friendly_key_name=\"Azure Summarize API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"summarize\"]:\n errors.append(\n \"Missing 'azure' summarize API key. Please provide a valid key in the settings.\"\n )\n return\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the recorded audio to text using the OpenAI Whisper API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n str | None: The transcript of the audio file or None if the transcription failed.\n \"\"\"\n detect_language = self.config[\"edge_tts\"].get(\"detect_language\")\n\n response_format = (\n \"verbose_json\" # verbose_json will return the language detected in the transcript.\n if self.tts_provider == \"edge_tts\" and detect_language\n else \"json\"\n )\n\n azure_config = None\n if self.stt_provider == \"azure\":\n azure_config = self._get_azure_config(\"whisper\")\n\n transcript = self.openai.transcribe(\n audio_input_wav, response_format=response_format, azure_config=azure_config\n )\n\n locale = None\n # skip the GPT call if we didn't change the language\n if (\n response_format == \"verbose_json\"\n and transcript\n and transcript.language != self.last_transcript_locale # type: ignore\n ):\n printr.print(\n f\" EdgeTTS detected language '{transcript.language}'.\", tags=\"info\" # type: ignore\n )\n locale = self.__ask_gpt_for_locale(transcript.language) # type: ignore\n\n return transcript.text if transcript else None, locale\n\n def _get_azure_config(self, section: str):\n azure_api_key = self.azure_keys[section]\n azure_config = AzureConfig(\n api_key=azure_api_key,\n api_base_url=self.config[\"azure\"]\n .get(section, {})\n .get(\"api_base_url\", None),\n api_version=self.config[\"azure\"].get(section, {}).get(\"api_version\", None),\n deployment_name=self.config[\"azure\"]\n .get(section, {})\n .get(\"deployment_name\", None),\n )\n\n return azure_config\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Gets the response for a given transcript.\n\n This function interprets the transcript, runs instant commands if triggered,\n calls the OpenAI API when needed, processes any tool calls, and generates the final response.\n\n Args:\n transcript (str): The user's spoken text transcribed.\n\n Returns:\n A tuple of strings representing the response to a function call and an instant response.\n \"\"\"\n self.last_transcript_locale = locale\n self._add_user_message(transcript)\n\n instant_response = self._try_instant_activation(transcript)\n if instant_response:\n return instant_response, instant_response\n\n completion = self._gpt_call()\n\n if completion is None:\n return None, None\n\n response_message, tool_calls = self._process_completion(completion)\n\n # do not tamper with this message as it will lead to 400 errors!\n self.messages.append(response_message)\n\n if tool_calls:\n instant_response = await self._handle_tool_calls(tool_calls)\n if instant_response:\n return None, instant_response\n\n summarize_response = self._summarize_function_calls()\n return self._finalize_response(str(summarize_response))\n\n return response_message.content, response_message.content\n\n def _add_user_message(self, content: str):\n \"\"\"Shortens the conversation history if needed and adds a user message to it.\n\n Args:\n content (str): The message content to add.\n role (str): The role of the message sender (\"user\", \"assistant\", \"function\" or \"tool\").\n tool_call_id (Optional[str]): The identifier for the tool call, if applicable.\n name (Optional[str]): The name of the function associated with the tool call, if applicable.\n \"\"\"\n msg = {\"role\": \"user\", \"content\": content}\n self._cleanup_conversation_history()\n self.messages.append(msg)\n\n def _cleanup_conversation_history(self):\n \"\"\"Cleans up the conversation history by removing messages that are too old.\"\"\"\n remember_messages = self.config.get(\"features\", {}).get(\n \"remember_messages\", None\n )\n\n if remember_messages is None or len(self.messages) == 0:\n return 0 # Configuration not set, nothing to delete.\n\n # The system message aka `context` does not count\n context_offset = (\n 1 if self.messages and self.messages[0][\"role\"] == \"system\" else 0\n )\n\n # Find the cutoff index where to end deletion, making sure to only count 'user' messages towards the limit starting with newest messages.\n cutoff_index = len(self.messages) - 1\n user_message_count = 0\n for message in reversed(self.messages):\n if self.__get_message_role(message) == \"user\":\n user_message_count += 1\n if user_message_count == remember_messages:\n break # Found the cutoff point.\n cutoff_index -= 1\n\n # If messages below the keep limit, don't delete anything.\n if user_message_count < remember_messages:\n return 0\n\n total_deleted_messages = cutoff_index - context_offset # Messages to delete.\n\n # Remove the messages before the cutoff index, exclusive of the system message.\n del self.messages[context_offset:cutoff_index]\n\n # Optional debugging printout.\n if self.debug and total_deleted_messages > 0:\n printr.print(\n f\"Deleted {total_deleted_messages} messages from the conversation history.\",\n tags=\"warn\",\n )\n\n return total_deleted_messages\n\n def reset_conversation_history(self):\n \"\"\"Resets the conversation history by removing all messages except for the initial system message.\"\"\"\n del self.messages[1:]\n\n def _try_instant_activation(self, transcript: str) -> str:\n \"\"\"Tries to execute an instant activation command if present in the transcript.\n\n Args:\n transcript (str): The transcript to check for an instant activation command.\n\n Returns:\n str: The response to the instant command or None if no such command was found.\n \"\"\"\n command = self._execute_instant_activation_command(transcript)\n if command:\n response = self._select_command_response(command)\n return response\n return None\n\n def _gpt_call(self):\n \"\"\"Makes the primary GPT call with the conversation history and tools enabled.\n\n Returns:\n The GPT completion object or None if the call fails.\n \"\"\"\n if self.debug:\n printr.print(\n f\" Calling GPT with {(len(self.messages) - 1)} messages (excluding context)\",\n tags=\"info\",\n )\n\n azure_config = None\n if self.conversation_provider == \"azure\":\n azure_config = self._get_azure_config(\"conversation\")\n\n return self.openai.ask(\n messages=self.messages,\n tools=self._build_tools(),\n model=self.config[\"openai\"].get(\"conversation_model\"),\n azure_config=azure_config,\n )\n\n def _process_completion(self, completion):\n \"\"\"Processes the completion returned by the GPT call.\n\n Args:\n completion: The completion object from an OpenAI call.\n\n Returns:\n A tuple containing the message response and tool calls from the completion.\n \"\"\"\n response_message = completion.choices[0].message\n\n content = response_message.content\n if content is None:\n response_message.content = \"\"\n\n return response_message, response_message.tool_calls\n\n async def _handle_tool_calls(self, tool_calls):\n \"\"\"Processes all the tool calls identified in the response message.\n\n Args:\n tool_calls: The list of tool calls to process.\n\n Returns:\n str: The immediate response from processed tool calls or None if there are no immediate responses.\n \"\"\"\n instant_response = None\n function_response = \"\"\n\n for tool_call in tool_calls:\n function_name = tool_call.function.name\n function_args = json.loads(tool_call.function.arguments)\n (\n function_response,\n instant_response,\n ) = await self._execute_command_by_function_call(\n function_name, function_args\n )\n\n msg = {\"role\": \"tool\", \"content\": function_response}\n if tool_call.id is not None:\n msg[\"tool_call_id\"] = tool_call.id\n if function_name is not None:\n msg[\"name\"] = function_name\n\n # Don't use self._add_user_message_to_history here because we never want to skip this because of history limitions\n self.messages.append(msg)\n\n return instant_response\n\n def _summarize_function_calls(self):\n \"\"\"Summarizes the function call responses using the GPT model specified for summarization in the configuration.\n\n Returns:\n The content of the GPT response to the function call summaries.\n \"\"\"\n azure_config = None\n if self.summarize_provider == \"azure\":\n azure_config = self._get_azure_config(\"summarize\")\n\n summarize_model = self.config[\"openai\"].get(\"summarize_model\")\n summarize_response = self.openai.ask(\n messages=self.messages,\n model=summarize_model,\n azure_config=azure_config,\n )\n\n if summarize_response is None:\n return None\n\n # do not tamper with this message as it will lead to 400 errors!\n message = summarize_response.choices[0].message\n self.messages.append(message)\n return message.content\n\n def _finalize_response(self, summarize_response: str) -> tuple[str, str]:\n \"\"\"Finalizes the response based on the call of the second (summarize) GPT call.\n\n Args:\n summarize_response (str): The response content from the second GPT call.\n\n Returns:\n A tuple containing the final response to the user.\n \"\"\"\n if summarize_response is None:\n return self.messages[-1][\"content\"], self.messages[-1][\"content\"]\n return summarize_response, summarize_response\n\n async def _execute_command_by_function_call(\n self, function_name: str, function_args: dict[str, any]\n ) -> tuple[str, str]:\n \"\"\"\n Uses an OpenAI function call to execute a command. If it's an instant activation_command, one if its reponses will be played.\n\n Args:\n function_name (str): The name of the function to be executed.\n function_args (dict[str, any]): The arguments to pass to the function being executed.\n\n Returns:\n A tuple containing two elements:\n - function_response (str): The text response or result obtained after executing the function.\n - instant_response (str): An immediate response or action to be taken, if any (e.g., play audio).\n \"\"\"\n function_response = \"\"\n instant_reponse = \"\"\n if function_name == \"execute_command\":\n # get the command based on the argument passed by GPT\n command = self._get_command(function_args[\"command_name\"])\n # execute the command\n function_response = self._execute_command(command)\n # if the command has responses, we have to play one of them\n if command and command.get(\"responses\"):\n instant_reponse = self._select_command_response(command)\n await self._play_to_user(instant_reponse)\n\n return function_response, instant_reponse\n\n async def _play_to_user(self, text: str):\n \"\"\"Plays audio to the user using the configured TTS Provider (default: OpenAI TTS).\n Also adds sound effects if enabled in the configuration.\n\n Args:\n text (str): The text to play as audio.\n \"\"\"\n\n if self.tts_provider == \"edge_tts\":\n await self._play_with_edge_tts(text)\n elif self.tts_provider == \"elevenlabs\":\n self._play_with_elevenlabs(text)\n elif self.tts_provider == \"azure\":\n self._play_with_azure(text)\n else:\n self._play_with_openai(text)\n\n def _play_with_openai(self, text):\n response = self.openai.speak(text, self.config[\"openai\"].get(\"tts_voice\"))\n if response is not None:\n self.audio_player.stream_with_effects(response.content, self.config)\n\n def _play_with_azure(self, text):\n azure_config = self.config[\"azure\"].get(\"tts\", None)\n\n if azure_config is None:\n return\n\n speech_config = speechsdk.SpeechConfig(\n subscription=self.azure_keys[\"tts\"],\n region=azure_config[\"region\"],\n )\n speech_config.speech_synthesis_voice_name = azure_config[\"voice\"]\n\n if azure_config[\"detect_language\"]:\n auto_detect_source_language_config = (\n speechsdk.AutoDetectSourceLanguageConfig()\n )\n\n speech_synthesizer = speechsdk.SpeechSynthesizer(\n speech_config=speech_config,\n audio_config=None,\n auto_detect_source_language_config=auto_detect_source_language_config\n if azure_config[\"detect_language\"]\n else None,\n )\n\n result = speech_synthesizer.speak_text_async(text).get()\n if result is not None:\n self.audio_player.stream_with_effects(result.audio_data, self.config)\n\n async def _play_with_edge_tts(self, text: str):\n edge_config = self.config[\"edge_tts\"]\n\n tts_voice = edge_config.get(\"tts_voice\")\n detect_language = edge_config.get(\"detect_language\")\n if detect_language:\n gender = edge_config.get(\"gender\")\n tts_voice = await self.edge_tts.get_same_random_voice_for_language(\n gender, self.last_transcript_locale\n )\n\n communicate, output_file = await self.edge_tts.generate_speech(\n text, voice=tts_voice\n )\n audio, sample_rate = self.audio_player.get_audio_from_file(output_file)\n\n self.audio_player.stream_with_effects((audio, sample_rate), self.config)\n\n def _play_with_elevenlabs(self, text: str):\n # presence already validated in validate()\n elevenlabs_config = self.config[\"elevenlabs\"]\n # validate() already checked that either id or name is set\n voice_id = elevenlabs_config[\"voice\"].get(\"id\")\n voice_name = elevenlabs_config[\"voice\"].get(\"name\")\n\n voice_settings = elevenlabs_config.get(\"voice_settings\", {})\n user = ElevenLabsUser(self.elevenlabs_api_key)\n model = elevenlabs_config.get(\"model\", \"eleven_multilingual_v2\")\n\n voice: (\n ElevenLabsVoice\n | ElevenLabsDesignedVoice\n | ElevenLabsClonedVoice\n | ElevenLabsProfessionalVoice\n ) = None\n if voice_id:\n voice = user.get_voice_by_ID(voice_id)\n else:\n voice = user.get_voices_by_name(voice_name)[0]\n\n # todo: add start/end callbacks to play Quindar beep even if use_sound_effects is disabled\n playback_options = PlaybackOptions(runInBackground=True)\n generation_options = GenerationOptions(\n model=model,\n latencyOptimizationLevel=elevenlabs_config.get(\"latency\", 0),\n style=voice_settings.get(\"style\", 0),\n use_speaker_boost=voice_settings.get(\"use_speaker_boost\", True),\n )\n stability = voice_settings.get(\"stability\")\n if stability is not None:\n generation_options.stability = stability\n\n similarity_boost = voice_settings.get(\"similarity_boost\")\n if similarity_boost is not None:\n generation_options.similarity_boost = similarity_boost\n\n style = voice_settings.get(\"style\")\n if style is not None and model != \"eleven_turbo_v2\":\n generation_options.style = style\n\n use_sound_effects = elevenlabs_config.get(\"use_sound_effects\", False)\n if use_sound_effects:\n audio_bytes, _history_id = voice.generate_audio_v2(\n prompt=text,\n generationOptions=generation_options,\n )\n if audio_bytes:\n self.audio_player.stream_with_effects(audio_bytes, self.config)\n else:\n voice.generate_stream_audio_v2(\n prompt=text,\n playbackOptions=playback_options,\n generationOptions=generation_options,\n )\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Does what Wingman base does, but always returns \"Ok\" instead of a command response.\n Otherwise the AI will try to respond to the command and generate a \"duplicate\" response for instant_activation commands.\n \"\"\"\n super()._execute_command(command)\n return \"Ok\"\n\n def _build_tools(self) -> list[dict]:\n \"\"\"\n Builds a tool for each command that is not instant_activation.\n\n Returns:\n list[dict]: A list of tool descriptors in OpenAI format.\n \"\"\"\n commands = [\n command[\"name\"]\n for command in self.config.get(\"commands\", [])\n if not command.get(\"instant_activation\")\n ]\n tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"execute_command\",\n \"description\": \"Executes a command\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"command_name\": {\n \"type\": \"string\",\n \"description\": \"The command to execute\",\n \"enum\": commands,\n },\n },\n \"required\": [\"command_name\"],\n },\n },\n },\n ]\n return tools\n\n def __ask_gpt_for_locale(self, language: str) -> str:\n \"\"\"OpenAI TTS returns a natural language name for the language of the transcript, e.g. \"german\" or \"english\".\n This method uses ChatGPT to find the corresponding locale, e.g. \"de-DE\" or \"en-EN\".\n\n Args:\n language (str): The natural, lowercase language name returned by OpenAI TTS. Thank you for that btw.. WTF OpenAI?\n \"\"\"\n\n response = self.openai.ask(\n messages=[\n {\n \"content\": \"\"\"\n I'll say a natural language name in lowercase and you'll just return the IETF country code / locale for this language.\n Your answer always has exactly 2 lowercase letters, a dash, then two more letters in uppercase.\n If I say \"german\", you answer with \"de-DE\". If I say \"russian\", you answer with \"ru-RU\".\n If it's ambiguous and you don't know which locale to pick (\"en-GB\" vs \"en-US\"), you pick the most commonly used one.\n You only answer with valid country codes according to most common standards.\n If you can't, you respond with \"None\".\n \"\"\",\n \"role\": \"system\",\n },\n {\n \"content\": language,\n \"role\": \"user\",\n },\n ],\n model=\"gpt-3.5-turbo-1106\",\n )\n answer = response.choices[0].message.content\n\n if answer == \"None\":\n return None\n\n printr.print(\n f\" ChatGPT says this language maps to locale '{answer}'.\", tags=\"info\"\n )\n return answer\n\n def __get_message_role(self, message):\n \"\"\"Helper method to get the role of the message regardless of its type.\"\"\"\n if isinstance(message, Mapping):\n return message.get(\"role\")\n elif hasattr(message, \"role\"):\n return message.role\n else:\n raise TypeError(\n f\"Message is neither a mapping nor has a 'role' attribute: {message}\"\n )" }, { "identifier": "Wingman", "path": "wingmen/wingman.py", "snippet": "class Wingman(FileCreator):\n \"\"\"The \"highest\" Wingman base class in the chain. It does some very basic things but is meant to be 'virtual', and so are most its methods, so you'll probably never instantiate it directly.\n\n Instead, you'll create a custom wingman that inherits from this (or a another subclass of it) and override its methods if needed.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n \"\"\"The constructor of the Wingman class. You can override it in your custom wingman.\n\n Args:\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n app_root_dir (str): The path to the root directory of the app. This is where the Wingman executable lives.\n \"\"\"\n\n super().__init__(app_root_dir=app_root_dir, subdir=\"wingman_data\")\n\n self.config = config\n \"\"\"All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\"\"\"\n\n self.secret_keeper = secret_keeper\n \"\"\"A service that allows you to store and retrieve secrets like API keys. It can prompt the user for secrets if necessary.\"\"\"\n\n self.name = name\n \"\"\"The name of the wingman. This is the key you gave it in the config, e.g. \"atc\".\"\"\"\n\n self.audio_player = AudioPlayer()\n \"\"\"A service that allows you to play audio files and add sound effects to them.\"\"\"\n\n self.execution_start: None | float = None\n \"\"\"Used for benchmarking executon times. The timer is (re-)started whenever the process function starts.\"\"\"\n\n self.debug: bool = self.config[\"features\"].get(\"debug_mode\", False)\n \"\"\"If enabled, the Wingman will skip executing any keypresses. It will also print more debug messages and benchmark results.\"\"\"\n\n self.tts_provider = self.config[\"features\"].get(\"tts_provider\")\n \"\"\"The name of the TTS provider you configured in the config.yaml\"\"\"\n\n self.app_root_dir = app_root_dir\n \"\"\"The path to the root directory of the app. This is where the Wingman executable lives.\"\"\"\n\n @staticmethod\n def create_dynamically(\n module_path: str,\n class_name: str,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n **kwargs,\n ):\n \"\"\"Dynamically creates a Wingman instance from a module path and class name\n\n Args:\n module_path (str): The module path, e.g. wingmen.open_ai_wingman. It's like the filepath from root to your custom-wingman.py but with dots instead of slashes and without the .py extension. Case-sensitive!\n class_name (str): The name of the class inside your custom-wingman.py, e.g. OpenAiWingman. Case-sensitive!\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n \"\"\"\n\n module = import_module(module_path)\n DerivedWingmanClass = getattr(module, class_name)\n instance = DerivedWingmanClass(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n **kwargs,\n )\n return instance\n\n def get_record_key(self) -> str:\n \"\"\"Returns the activation or \"push-to-talk\" key for this Wingman.\"\"\"\n return self.config.get(\"record_key\", None)\n\n def print_execution_time(self, reset_timer=False):\n \"\"\"Prints the current time since the execution started (in seconds).\"\"\"\n if self.execution_start:\n execution_stop = time.perf_counter()\n elapsed_seconds = execution_stop - self.execution_start\n printr.print(f\"...took {elapsed_seconds:.2f}s\", tags=\"info\")\n if reset_timer:\n self.start_execution_benchmark()\n\n def start_execution_benchmark(self):\n \"\"\"Starts the execution benchmark timer.\"\"\"\n self.execution_start = time.perf_counter()\n\n # ──────────────────────────────────── Hooks ─────────────────────────────────── #\n\n def validate(self) -> list[str]:\n \"\"\"Use this function to validate params and config before the Wingman is started.\n If you add new config sections or entries to your custom wingman, you should validate them here.\n\n It's a good idea to collect all errors from the base class and not to swallow them first.\n\n If you return errors, your Wingman will be disabled by Tower and not be loaded.\n\n Returns:\n list[str]: A list of error messages or an empty list if everything is okay.\n \"\"\"\n return []\n\n # TODO: this should be async\n def prepare(self):\n \"\"\"This method is called only once when the Wingman is instantiated by Tower.\n It is run AFTER validate() so you can access validated params safely here.\n\n You can override it if you need to load async data from an API or file.\"\"\"\n pass\n\n def reset_conversation_history(self):\n \"\"\"This function is called when the user triggers the ResetConversationHistory command.\n It's a global command that should be implemented by every Wingman that keeps a message history.\n \"\"\"\n\n # ──────────────────────────── The main processing loop ──────────────────────────── #\n\n async def process(self, audio_input_wav: str):\n \"\"\"The main method that gets called when the wingman is activated. This method controls what your wingman actually does and you can override it if you want to.\n\n The base implementation here triggers the transcription and processing of the given audio input.\n If you don't need even transcription, you can just override this entire process method. If you want transcription but then do something in addition, you can override the listed hooks.\n\n Async so you can do async processing, e.g. send a request to an API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Hooks:\n - async _transcribe: transcribe the audio to text\n - async _get_response_for_transcript: process the transcript and return a text response\n - async _play_to_user: do something with the response, e.g. play it as audio\n \"\"\"\n\n self.start_execution_benchmark()\n\n process_result = None\n\n if self.debug:\n printr.print(\"Starting transcription...\", tags=\"info\")\n\n # transcribe the audio.\n transcript, locale = await self._transcribe(audio_input_wav)\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n if transcript:\n printr.print(f\">> (You): {transcript}\", tags=\"violet\")\n\n if self.debug:\n printr.print(\"Getting response for transcript...\", tags=\"info\")\n\n # process the transcript further. This is where you can do your magic. Return a string that is the \"answer\" to your passed transcript.\n process_result, instant_response = await self._get_response_for_transcript(\n transcript, locale\n )\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n actual_response = instant_response or process_result\n printr.print(f\"<< ({self.name}): {actual_response}\", tags=\"green\")\n\n if self.debug:\n printr.print(\"Playing response back to user...\", tags=\"info\")\n\n # the last step in the chain. You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n await self._play_to_user(str(process_result))\n\n if self.debug:\n self.print_execution_time()\n\n # ───────────────── virtual methods / hooks ───────────────── #\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the audio to text. You can override this method if you want to use a different transcription service.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n tuple[str | None, str | None]: The transcript of the audio file and the detected language as locale (if determined).\n \"\"\"\n return None, None\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Processes the transcript and return a response as text. This where you'll do most of your work.\n Pass the transcript to AI providers and build a conversation. Call commands or APIs. Play temporary results to the user etc.\n\n\n Args:\n transcript (str): The user's spoken text transcribed as text.\n locale (str | None): The language that was detected to be used in the transcript, e.g. \"de-DE\".\n\n Returns:\n A tuple of strings representing the response to a function call and/or an instant response.\n \"\"\"\n return (\"\", \"\")\n\n async def _play_to_user(self, text: str):\n \"\"\"You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n\n Args:\n text (str): The response of your _get_response_for_transcript. This is usually the \"response\" from conversation with the AI.\n \"\"\"\n pass\n\n # ───────────────────────────────── Commands ─────────────────────────────── #\n\n def _get_command(self, command_name: str) -> dict | None:\n \"\"\"Extracts the command with the given name\n\n Args:\n command_name (str): the name of the command you used in the config\n\n Returns:\n {}: The command object from the config\n \"\"\"\n\n command = next(\n (\n item\n for item in self.config.get(\"commands\", [])\n if item[\"name\"] == command_name\n ),\n None,\n )\n return command\n\n def _select_command_response(self, command: dict) -> str | None:\n \"\"\"Returns one of the configured responses of the command. This base implementation returns a random one.\n\n Args:\n command (dict): The command object from the config\n\n Returns:\n str: A random response from the command's responses list in the config.\n \"\"\"\n command_responses = command.get(\"responses\", None)\n if (command_responses is None) or (len(command_responses) == 0):\n return None\n\n return random.choice(command_responses)\n\n def _execute_instant_activation_command(self, transcript: str) -> dict | None:\n \"\"\"Uses a fuzzy string matching algorithm to match the transcript to a configured instant_activation command and executes it immediately.\n\n Args:\n transcript (text): What the user said, transcripted to text. Needs to be similar to one of the defined instant_activation phrases to work.\n\n Returns:\n {} | None: The executed instant_activation command.\n \"\"\"\n\n instant_activation_commands = [\n command\n for command in self.config.get(\"commands\", [])\n if command.get(\"instant_activation\")\n ]\n\n # check if transcript matches any instant activation command. Each command has a list of possible phrases\n for command in instant_activation_commands:\n for phrase in command.get(\"instant_activation\"):\n ratio = SequenceMatcher(\n None,\n transcript.lower(),\n phrase.lower(),\n ).ratio()\n if (\n ratio > 0.8\n ): # if the ratio is higher than 0.8, we assume that the command was spoken\n self._execute_command(command)\n\n if command.get(\"responses\"):\n return command\n return None\n return None\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Triggers the execution of a command. This base implementation executes the keypresses defined in the command.\n\n Args:\n command (dict): The command object from the config to execute\n\n Returns:\n str: the selected response from the command's responses list in the config. \"Ok\" if there are none.\n \"\"\"\n\n if not command:\n return \"Command not found\"\n\n printr.print(f\"❖ Executing command: {command.get('name')}\", tags=\"info\")\n\n if self.debug:\n printr.print(\n \"Skipping actual keypress execution in debug_mode...\", tags=\"warn\"\n )\n\n if len(command.get(\"keys\", [])) > 0 and not self.debug:\n self.execute_keypress(command)\n # TODO: we could do mouse_events here, too...\n\n # handle the global special commands:\n if command.get(\"name\", None) == \"ResetConversationHistory\":\n self.reset_conversation_history()\n\n if not self.debug:\n # in debug mode we already printed the separate execution times\n self.print_execution_time()\n\n return self._select_command_response(command) or \"Ok\"\n\n def execute_keypress(self, command: dict):\n \"\"\"Executes the keypresses defined in the command in order.\n\n pydirectinput uses SIGEVENTS to send keypresses to the OS. This lib seems to be the only way to send keypresses to games reliably.\n\n It only works on Windows. For MacOS, we fall back to PyAutoGUI (which has the exact same API as pydirectinput is built on top of it).\n\n Args:\n command (dict): The command object from the config to execute\n \"\"\"\n\n for entry in command.get(\"keys\", []):\n if entry.get(\"modifier\"):\n key_module.keyDown(entry[\"modifier\"])\n\n if entry.get(\"hold\"):\n key_module.keyDown(entry[\"key\"])\n time.sleep(entry[\"hold\"])\n key_module.keyUp(entry[\"key\"])\n else:\n key_module.press(entry[\"key\"])\n\n if entry.get(\"modifier\"):\n key_module.keyUp(entry[\"modifier\"])\n\n if entry.get(\"wait\"):\n time.sleep(entry[\"wait\"])" }, { "identifier": "Printr", "path": "services/printr.py", "snippet": "class Printr(object):\n _instance = None\n\n LILA = \"\\033[95m\"\n BLUE = \"\\033[94m\"\n CYAN = \"\\033[96m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n RED = \"\\033[91m\"\n CLEAR = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n FAINT = \"\\033[2m\"\n NORMAL_WEIGHT = \"\\033[22m\"\n UNDERLINE = \"\\033[4m\"\n END_UNDERLINE = \"\\033[24m\"\n OVERLINE = \"\\033[53m\"\n END_OVERLINE = \"\\033[55m\"\n FRAMED = \"\\033[51m\"\n ENCIRCLED = \"\\033[52m\"\n DELETE_LINE = \"\\033[2K\\033[1G\"\n PREVIOUS_LINE = \"\\033[2F\"\n\n tags = [\n # {\"tagName\": \"bold\", \"font\": \"TkTextFont bold\"},\n {\"tagName\": \"info\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"warn\", \"foreground\": \"orange\"},\n {\"tagName\": \"err\", \"foreground\": \"red\"},\n\n {\"tagName\": \"green\", \"foreground\": \"#33cc33\"},\n {\"tagName\": \"blue\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"violet\", \"foreground\": \"#aa33dd\"},\n {\"tagName\": \"grey\", \"foreground\": \"grey\"}\n ]\n\n CHANNEL = Literal[\"main\", \"error\", \"warning\", \"info\"]\n OUTPUT_TYPES = None | ctk.StringVar | ctk.CTkTextbox\n\n _message_stacks: dict[CHANNEL, list] = dict(\n main=[],\n error=[],\n warning=[],\n info=[]\n )\n\n # NOTE this is a singleton class\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super(Printr, cls).__new__(cls)\n\n cls.out: dict[Printr.CHANNEL, Printr.OUTPUT_TYPES ] = dict(\n main=None,\n error=None,\n warning=None,\n info=None\n )\n return cls._instance\n\n\n def set_output(self, output_channel: CHANNEL, output_element: OUTPUT_TYPES):\n if isinstance(output_element, ctk.CTkTextbox):\n for tag in self.tags:\n output_element.tag_config(**tag)\n\n self.out[output_channel] = output_element\n\n msg_stack = self._message_stacks.get(output_channel, [])\n if len(msg_stack) > 0:\n msg = \"\\n\".join(msg_stack)\n self.print(msg, output_channel)\n # TODO: clear stack?\n for _ in range(len(msg_stack)):\n msg_stack.pop()\n\n\n\n def print(self, text, output_channel: CHANNEL = \"main\", tags=None, wait_for_gui=False, console_only=False):\n channel = self.out.get(output_channel, None)\n if channel and not console_only:\n if isinstance(channel, ctk.CTkTextbox):\n channel.configure(state=\"normal\")\n channel.insert(\"end\", f\"{text}\\n\", tags=tags)\n channel.see(\"end\")\n channel.configure(state=\"disabled\")\n else:\n # output type -> StringVar\n channel.set(text)\n elif wait_for_gui and not console_only:\n # message should only be shown in GUI\n # so add it to the queue to wait for GUI initialization\n self._message_stacks.get(output_channel, []).append(text)\n else:\n # no special output type -> terminal output\n print(text)\n\n\n def print_err(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"error\", wait_for_gui=wait_for_gui)\n\n def print_warn(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"warning\", wait_for_gui=wait_for_gui)\n\n def print_info(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"info\", wait_for_gui=wait_for_gui)\n\n\n @staticmethod\n def clr(text, color_format):\n return f\"{color_format}{text}{Printr.CLEAR}\"\n\n @staticmethod\n def clr_print(text, color_format):\n print(Printr.clr(text, color_format))\n\n @staticmethod\n def sys_print(text, headline=\"\", color=RED, first_message=True):\n if first_message:\n print(\"\")\n if headline.strip():\n print(\n Printr.clr(f\"{Printr.BOLD}{headline}{Printr.NORMAL_WEIGHT}\", color)\n )\n else:\n print(Printr.PREVIOUS_LINE)\n print(Printr.clr(f\"⎢ {text}\", color))\n print(\"\")\n\n @staticmethod\n def err_print(text, first_message=True):\n Printr.sys_print(text, \"Something went wrong!\", first_message=first_message)\n\n @staticmethod\n def warn_print(text, first_message=True):\n Printr.sys_print(text, \"Please note:\", Printr.YELLOW, first_message)\n\n @staticmethod\n def info_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.BLUE, first_message)\n\n @staticmethod\n def hl_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.CYAN, first_message)\n\n @staticmethod\n def override_print(text):\n print(f\"{Printr.DELETE_LINE}{text}\")\n\n @staticmethod\n def box_start():\n print(\n f\"{Printr.CYAN}⎡{Printr.OVERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_OVERLINE}⎤\"\n )\n print(f\"⎢{Printr.CLEAR}\")\n\n @staticmethod\n def box_end():\n print(f\"{Printr.CYAN}⎢\")\n print(\n f\"⎣{Printr.UNDERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_UNDERLINE}⎦{Printr.CLEAR}\"\n )\n\n @staticmethod\n def box_print(text):\n print(f\"{Printr.CYAN}⎜{Printr.CLEAR} {text}\")" }, { "identifier": "SecretKeeper", "path": "services/secret_keeper.py", "snippet": "class SecretKeeper:\n def __init__(self, app_root_path: str):\n self.printr = Printr()\n self.system_config_path: str = os.path.join(app_root_path, SYSTEM_CONFIG_PATH)\n self.config_file = os.path.join(self.system_config_path, SECRETS_FILE)\n self.secrets = self.__load()\n if not self.secrets:\n self.secrets = {}\n\n def __load(self) -> dict[str, any]: # type: ignore\n parsed_config = None\n\n if os.path.exists(self.config_file) and os.path.isfile(self.config_file):\n with open(self.config_file, \"r\", encoding=\"UTF-8\") as stream:\n try:\n parsed_config = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not load ({SECRETS_FILE})\\n{str(e)}\", True\n )\n\n return parsed_config\n\n def save(self):\n \"\"\"Write all secrets to the file\"\"\"\n with open(self.config_file, \"w\", encoding=\"UTF-8\") as stream:\n try:\n yaml.dump(self.secrets, stream)\n return True\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not write ({SECRETS_FILE})\\n{str(e)}\", True\n )\n return False\n\n def retrieve(\n self,\n requester: str,\n key: str,\n friendly_key_name: str,\n prompt_if_missing: bool = True,\n ) -> str:\n \"\"\"Retrieve secret a secret and optionally prompt user for it if missing\"\"\"\n\n secret = self.secrets.get(key, None)\n if not secret and prompt_if_missing:\n # Prompt user for key\n dialog = ctk.CTkInputDialog(\n text=f\"Please enter '{friendly_key_name}':\",\n title=f\"{requester} needs to know a secret\",\n )\n secret = dialog.get_input()\n if secret:\n secret = secret.strip().replace(\"\\n\", \"\")\n self.secrets[key] = secret\n self.save()\n\n return secret" } ]
import copy from exceptions import MissingApiKeyException from wingmen.open_ai_wingman import OpenAiWingman from wingmen.wingman import Wingman from services.printr import Printr from services.secret_keeper import SecretKeeper
12,282
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper
self.key_wingman_dict: dict[str, Wingman] = {}
2
2023-11-15 09:36:06+00:00
16k
wjun0830/CGDETR
cg_detr/inference.py
[ { "identifier": "AverageMeter", "path": "utils/basic_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n\n def update(self, val, n=1):\n self.max = max(val, self.max)\n self.min = min(val, self.min)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "TestOptions", "path": "cg_detr/config.py", "snippet": "class TestOptions(BaseOptions):\n \"\"\"add additional options for evaluating\"\"\"\n\n def initialize(self):\n BaseOptions.initialize(self)\n # also need to specify --eval_split_name\n self.parser.add_argument(\"--eval_id\", type=str, help=\"evaluation id\")\n self.parser.add_argument(\"--eval_results_dir\", type=str, default=None,\n help=\"dir to save results, if not set, fall back to training results_dir\")\n self.parser.add_argument(\"--model_dir\", type=str,\n help=\"dir contains the model file, will be converted to absolute path afterwards\")" }, { "identifier": "build_model", "path": "cg_detr/model.py", "snippet": "def build_model(args):\n device = torch.device(args.device)\n\n transformer = build_transformer(args)\n position_embedding, txt_position_embedding = build_position_encoding(args)\n\n if args.a_feat_dir is None:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n else:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n aud_dim=args.a_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n\n matcher = build_matcher(args)\n weight_dict = {\"loss_span\": args.span_loss_coef,\n \"loss_giou\": args.giou_loss_coef,\n \"loss_label\": args.label_loss_coef,\n \"loss_saliency\": args.lw_saliency,\n \"loss_ms_align\": args.lw_ms_align,\n \"loss_distill\": args.lw_distill,\n \"loss_orthogonal_dummy\":args.lw_distill}\n if args.contrastive_align_loss:\n weight_dict[\"loss_contrastive_align\"] = args.contrastive_align_loss_coef\n\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items() if k != \"loss_saliency\"})\n weight_dict.update(aux_weight_dict)\n\n losses = ['spans', 'labels', 'saliency', 'ms_align', 'distill', 'orthogonal_dummy']\n if args.contrastive_align_loss:\n losses += [\"contrastive_align\"]\n \n # For highlight detection datasets\n use_matcher = not (args.dset_name in ['youtube_uni', 'tvsum'])\n \n criterion = SetCriterion(\n matcher=matcher, weight_dict=weight_dict, losses=losses,\n eos_coef=args.eos_coef, temperature=args.temperature,\n span_loss_type=args.span_loss_type, max_v_l=args.max_v_l,\n saliency_margin=args.saliency_margin, use_matcher=use_matcher, args=args\n )\n criterion.to(device)\n return model, criterion" }, { "identifier": "span_cxw_to_xx", "path": "cg_detr/span_utils.py", "snippet": "def span_cxw_to_xx(cxw_spans):\n \"\"\"\n Args:\n cxw_spans: tensor, (#windows, 2) or (..., 2), the last dim is a row denoting a window of format (center, width)\n\n >>> spans = torch.Tensor([[0.5000, 1.0000], [0.3000, 0.2000]])\n >>> span_cxw_to_xx(spans)\n tensor([[0.0000, 1.0000],\n [0.2000, 0.4000]])\n >>> spans = torch.Tensor([[[0.5000, 1.0000], [0.3000, 0.2000]]])\n >>> span_cxw_to_xx(spans)\n tensor([[[0.0000, 1.0000],\n [0.2000, 0.4000]]])\n \"\"\"\n x1 = cxw_spans[..., 0] - 0.5 * cxw_spans[..., 1]\n x2 = cxw_spans[..., 0] + 0.5 * cxw_spans[..., 1]\n return torch.stack([x1, x2], dim=-1)" }, { "identifier": "StartEndDataset", "path": "cg_detr/start_end_dataset.py", "snippet": "class StartEndDataset(Dataset):\n Q_FEAT_TYPES = [\"pooler_output\", \"last_hidden_state\"]\n \"\"\"One line in data loaded from data_path.\"\n {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17],\n \"relevant_windows\": [[26, 36]]\n }\n \"\"\"\n\n def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode=\"video\",\n normalize_v=True, normalize_t=True, load_labels=True,\n clip_len=2, max_windows=5, span_loss_type=\"l1\", txt_drop_ratio=0,\n dset_domain=None):\n self.dset_name = dset_name\n self.data_path = data_path\n self.data_ratio = data_ratio\n self.v_feat_dirs = v_feat_dirs \\\n if isinstance(v_feat_dirs, list) else [v_feat_dirs]\n self.q_feat_dir = q_feat_dir\n self.q_feat_type = q_feat_type\n if max_v_l == -1:\n max_v_l = 100000000\n if max_q_l == -1:\n max_q_l = 100\n self.max_q_l = max_q_l\n self.max_v_l = max_v_l\n self.ctx_mode = ctx_mode\n self.use_tef = \"tef\" in ctx_mode\n self.use_video = \"video\" in ctx_mode\n self.normalize_t = normalize_t\n self.normalize_v = normalize_v\n self.load_labels = load_labels\n self.clip_len = clip_len\n self.max_windows = max_windows # maximum number of windows to use as labels\n self.span_loss_type = span_loss_type\n self.txt_drop_ratio = txt_drop_ratio\n if \"val\" in data_path or \"test\" in data_path:\n assert txt_drop_ratio == 0\n\n\n # checks\n assert q_feat_type in self.Q_FEAT_TYPES\n\n # data\n self.data = self.load_data()\n \n # load specific domain data for tvsum dataset\n if self.dset_name in ['tvsum', 'tvsum_sfc']:\n target_domain = dset_domain\n assert target_domain in [\"BK\", \"BT\", \"DS\", \"FM\", \"GA\", \"MS\", \"PK\", \"PR\", \"VT\", \"VU\"]\n\n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data\n \n # load specific domain data for youtube-hl dataset\n if self.dset_name == 'youtube_uni':\n target_domain = dset_domain\n assert target_domain in [\"dog\", \"gymnastics\", \"parkour\", \"skating\", \"skiing\", \"surfing\"]\n \n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data \n \n self.use_glove = False\n self.use_glove = 'vgg' in self.v_feat_dirs[0]\n\n if self.dset_name == 'charadesSTA' and self.use_glove:\n self.vocab = vocab.pretrained_aliases['glove.6B.300d']()\n self.vocab.itos.extend(['<unk>'])\n self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]\n self.vocab.vectors = torch.cat(\n (self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)\n self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)\n \n\n def load_data(self):\n datalist = load_jsonl(self.data_path)\n if self.data_ratio != 1:\n n_examples = int(len(datalist) * self.data_ratio)\n datalist = datalist[:n_examples]\n logger.info(\"Using {}% of the data: {} examples\"\n .format(self.data_ratio * 100, n_examples))\n return datalist\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n meta = self.data[index]\n\n model_inputs = dict()\n\n if self.use_glove:\n model_inputs[\"query_feat\"] = self.get_query(meta[\"query\"])\n else:\n model_inputs[\"query_feat\"] = self._get_query_feat_by_qid(meta[\"qid\"]) # (Dq, ) or (Lq, Dq)\n \n if self.use_video:\n model_inputs[\"video_feat\"] = self._get_video_feat_by_vid(meta[\"vid\"]) # (Lv, Dv)\n ctx_l = len(model_inputs[\"video_feat\"])\n else:\n ctx_l = self.max_v_l\n\n\n if self.use_tef:\n tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l\n tef_ed = tef_st + 1.0 / ctx_l\n tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)\n if self.use_video:\n model_inputs[\"video_feat\"] = torch.cat(\n [model_inputs[\"video_feat\"], tef], dim=1) # (Lv, Dv+2)\n else:\n model_inputs[\"video_feat\"] = tef\n\n\n if self.dset_name in ['tvsum']:\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_tvsum(meta_label, ctx_l)\n if len(model_inputs[\"saliency_all_labels\"]) != len(model_inputs[\"video_feat\"]):\n model_inputs[\"video_feat\"] = model_inputs[\"video_feat\"][:len(model_inputs[\"saliency_all_labels\"])]\n\n elif self.dset_name == 'youtube_uni':\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_youtube(meta_label, ctx_l)\n else:\n if \"relevant_windows\" in meta: ## For Qvhighlights test set\n model_inputs[\"span_labels\"] = self.get_span_labels(meta[\"relevant_windows\"], ctx_l) # (#windows, 2)\n if self.dset_name in ['charadesSTA', 'tacos', 'activitynet']: ## charades, tacos, nlq\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n elif self.dset_name in ['nlq']:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l, 2) # only one gt\n elif \"subs_train\" not in self.data_path:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all(meta[\"relevant_clip_ids\"], meta[\"saliency_scores\"], ctx_l)\n else:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\n \"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n\n if 'qvhighlight' in self.data_path:\n model_inputs[\"relevant_clip_ids\"] = meta[\"relevant_clip_ids\"]\n model_inputs[\"vid\"] = meta[\"vid\"]\n model_inputs[\"qid\"] = meta[\"qid\"]\n return dict(meta=meta, model_inputs=model_inputs)\n\n def get_query(self, query):\n word_inds = torch.LongTensor(\n [self.vocab.stoi.get(w.lower(), 400000) for w in query.split()])\n return self.embedding(word_inds)\n\n def get_saliency_labels_sub_as_query(self, gt_window, duration, ctx_l, max_n=2):\n clip_len = duration / ctx_l\n gt_st = int(gt_window[0] / clip_len)\n gt_ed = max(0, min(int(gt_window[1] / clip_len), ctx_l) - 1)\n if gt_st > gt_ed:\n gt_st = gt_ed\n\n if gt_st != gt_ed:\n pos_clip_indices = random.sample(range(gt_st, gt_ed + 1), k=max_n)\n else:\n if self.dset_name == 'nlq':\n pos_clip_indices = [gt_st] * 2\n else:\n pos_clip_indices = [gt_st, gt_st]\n\n neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))\n try:\n neg_clip_indices = random.sample(neg_pool, k=max_n)\n except:\n neg_clip_indices = pos_clip_indices\n\n # For charades_sta\n score_array = np.zeros(ctx_l)\n score_array[gt_st:gt_ed + 1] = 1\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n\n def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices\n\n def get_saliency_labels_all(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # score_array = [min(agg_scores[idx], ctx_l-1) for idx in range(ctx_l)]\n score_array = np.zeros(ctx_l)\n for idx in range(len(rel_clip_ids)):\n if rel_clip_ids[idx] >= ctx_l:\n score_array_new = np.zeros(ctx_l + 1)\n score_array_new[:ctx_l] = score_array\n score_array = score_array_new\n score_array[rel_clip_ids[idx]] = agg_scores[idx]\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_tvsum(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n agg_scores = np.sum(labels - np.ones_like(labels), axis=-1)[:ctx_l] # start from 1, so minus 1\n score_array = agg_scores / 80 * 12\n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_youtube(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n # Youtube-hl only have binary score\n agg_scores = np.array(labels)[:, 0] # (L, 1) --> (L, )\n score_array = agg_scores * 1\n \n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n \n def get_span_labels(self, windows, ctx_l):\n \"\"\"\n windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)\n Note a maximum of `self.max_windows` windows are used.\n returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length\n \"\"\"\n if len(windows) > self.max_windows:\n random.shuffle(windows)\n windows = windows[:self.max_windows]\n if self.span_loss_type == \"l1\":\n windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx\n windows = span_xx_to_cxw(windows) # normalized windows in cxw\n elif self.span_loss_type == \"ce\":\n windows = torch.Tensor([\n [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]\n for w in windows]).long() # inclusive\n else:\n raise NotImplementedError\n return windows\n\n def _get_query_feat_by_qid(self, qid):\n if self.dset_name == 'tvsum':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid))) # 'token', 'text'\n return torch.from_numpy(q_feat['token'])\n # youtube-hl\n elif self.dset_name == 'youtube_uni':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid)))\n return torch.from_numpy(q_feat['last_hidden_state'])\n \n elif self.dset_name in ['tacos', 'nlq']:\n q_feat_path = join(self.q_feat_dir, f\"{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n else:\n # QVhighlight dataset\n q_feat_path = join(self.q_feat_dir, f\"qid{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n return torch.from_numpy(q_feat) # (D, ) or (Lq, D)\n\n def random_drop_rows(self, embeddings):\n \"\"\"randomly mask num_drop rows in embeddings to be zero.\n Args:\n embeddings: np.ndarray (L, D)\n \"\"\"\n num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)\n if num_drop_rows > 0:\n row_indices = np.random.choice(\n len(embeddings), size=num_drop_rows, replace=False)\n embeddings[row_indices] = 0\n return embeddings\n\n def _get_video_feat_by_vid(self, vid):\n if self.dset_name == 'tvsum':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n _feat_path = join(_feat_dir, f\"{vid}_rgb.npy\")\n _feat_rgb = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n\n _feat_path = join(_feat_dir, f\"{vid}_opt.npy\")\n _feat_opt = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n _feat = np.concatenate([_feat_rgb, _feat_opt], axis=-1)\n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n elif self.dset_name == 'youtube_uni':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n # Only single npz files per directory\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list] # TODO do we need to cut the length over the min_len?\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n else:\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n return torch.from_numpy(v_feat) # (Lv, D)" }, { "identifier": "start_end_collate", "path": "cg_detr/start_end_dataset.py", "snippet": "def start_end_collate(batch):\n batch_meta = [e[\"meta\"] for e in batch] # seems no need to collate ?\n\n model_inputs_keys = batch[0][\"model_inputs\"].keys()\n batched_data = dict()\n for k in model_inputs_keys:\n if k == \"span_labels\":\n batched_data[k] = [dict(spans=e[\"model_inputs\"][\"span_labels\"]) for e in batch]\n continue\n if k in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n batched_data[k] = torch.LongTensor([e[\"model_inputs\"][k] for e in batch])\n continue\n if k == \"saliency_all_labels\":\n pad_data, mask_data = pad_sequences_1d([e[\"model_inputs\"][k] for e in batch], dtype=np.float32, fixed_length=None)\n batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)\n continue\n if k == 'qid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n if k == 'vid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n batched_data[k] = pad_sequences_1d(\n [e[\"model_inputs\"][k] for e in batch], dtype=torch.float32, fixed_length=None)\n return batch_meta, batched_data" }, { "identifier": "prepare_batch_inputs", "path": "cg_detr/start_end_dataset.py", "snippet": "def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):\n model_inputs = dict(\n src_txt=batched_model_inputs[\"query_feat\"][0].to(device, non_blocking=non_blocking),\n src_txt_mask=batched_model_inputs[\"query_feat\"][1].to(device, non_blocking=non_blocking),\n src_vid=batched_model_inputs[\"video_feat\"][0].to(device, non_blocking=non_blocking),\n src_vid_mask=batched_model_inputs[\"video_feat\"][1].to(device, non_blocking=non_blocking),\n vid=batched_model_inputs[\"vid\"],\n qid=batched_model_inputs[\"qid\"],\n )\n targets = {}\n\n if \"span_labels\" in batched_model_inputs:\n targets[\"span_labels\"] = [\n dict(spans=e[\"spans\"].to(device, non_blocking=non_blocking))\n for e in batched_model_inputs[\"span_labels\"]\n ]\n if \"saliency_pos_labels\" in batched_model_inputs:\n for name in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)\n\n if \"saliency_all_labels\" in batched_model_inputs:\n targets[\"saliency_all_labels\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets[\"relevant_clips\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets = None if len(targets) == 0 else targets\n return model_inputs, targets" }, { "identifier": "PostProcessorDETR", "path": "cg_detr/postprocessing_cg_detr.py", "snippet": "class PostProcessorDETR:\n def __init__(self, clip_length=2, min_ts_val=0, max_ts_val=150,\n min_w_l=2, max_w_l=70, move_window_method=\"center\",\n process_func_names=(\"clip_window_l\", \"clip_ts\", \"round_multiple\")):\n self.clip_length = clip_length\n self.min_ts_val = min_ts_val\n self.max_ts_val = max_ts_val\n self.min_w_l = min_w_l\n self.max_w_l = max_w_l\n self.move_window_method = move_window_method\n self.process_func_names = process_func_names\n self.name2func = dict(\n clip_ts=self.clip_min_max_timestamps,\n round_multiple=self.round_to_multiple_clip_lengths,\n clip_window_l=self.clip_window_lengths\n )\n\n def __call__(self, lines):\n processed_lines = []\n for line in tqdm(lines, desc=f\"convert to multiples of clip_length={self.clip_length}\"):\n windows_and_scores = torch.tensor(line[\"pred_relevant_windows\"])\n windows = windows_and_scores[:, :2]\n for func_name in self.process_func_names:\n windows = self.name2func[func_name](windows)\n line[\"pred_relevant_windows\"] = torch.cat(\n [windows, windows_and_scores[:, 2:3]], dim=1).tolist()\n line[\"pred_relevant_windows\"] = [e[:2] + [float(f\"{e[2]:.4f}\")] for e in line[\"pred_relevant_windows\"]]\n processed_lines.append(line)\n return processed_lines\n\n def clip_min_max_timestamps(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure timestamps for all windows is within [min_val, max_val], clip is out of boundaries.\n \"\"\"\n return torch.clamp(windows, min=self.min_ts_val, max=self.max_ts_val)\n\n def round_to_multiple_clip_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure the final window timestamps are multiples of `clip_length`\n \"\"\"\n return torch.round(windows / self.clip_length) * self.clip_length\n\n def clip_window_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) np.ndarray\n ensure the final window duration are within [self.min_w_l, self.max_w_l]\n \"\"\"\n window_lengths = windows[:, 1] - windows[:, 0]\n small_rows = window_lengths < self.min_w_l\n if torch.sum(small_rows) > 0:\n windows = self.move_windows(\n windows, small_rows, self.min_w_l, move_method=self.move_window_method)\n large_rows = window_lengths > self.max_w_l\n if torch.sum(large_rows) > 0:\n windows = self.move_windows(\n windows, large_rows, self.max_w_l, move_method=self.move_window_method)\n return windows\n\n @classmethod\n def move_windows(cls, windows, row_selector, new_length, move_method=\"left\"):\n \"\"\"\n Args:\n windows:\n row_selector:\n new_length:\n move_method: str,\n left: keep left unchanged\n center: keep center unchanged\n right: keep right unchanged\n\n Returns:\n\n \"\"\"\n # import ipdb;\n # ipdb.set_trace()\n if move_method == \"left\":\n windows[row_selector, 1] = windows[row_selector, 0] + new_length\n elif move_method == \"right\":\n windows[row_selector, 0] = windows[row_selector, 1] - new_length\n elif move_method == \"center\":\n center = (windows[row_selector, 1] + windows[row_selector, 0]) / 2.\n windows[row_selector, 0] = center - new_length / 2.\n windows[row_selector, 1] = center + new_length / 2.\n return windows" }, { "identifier": "eval_submission", "path": "standalone_eval/eval.py", "snippet": "def eval_submission(submission, ground_truth, verbose=True, match_number=True):\n \"\"\"\n Args:\n submission: list(dict), each dict is {\n qid: str,\n query: str,\n vid: str,\n pred_relevant_windows: list([st, ed]),\n pred_saliency_scores: list(float), len == #clips in video.\n i.e., each clip in the video will have a saliency score.\n }\n ground_truth: list(dict), each dict is {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17]\n \"saliency_scores\": [[4, 4, 2], [3, 4, 2], [2, 2, 3], [2, 2, 2], [0, 1, 3]]\n each sublist corresponds to one clip in relevant_clip_ids.\n The 3 elements in the sublist are scores from 3 different workers. The\n scores are in [0, 1, 2, 3, 4], meaning [Very Bad, ..., Good, Very Good]\n }\n verbose:\n match_number:\n\n Returns:\n\n \"\"\"\n pred_qids = set([e[\"qid\"] for e in submission])\n gt_qids = set([e[\"qid\"] for e in ground_truth])\n if match_number:\n assert pred_qids == gt_qids, \\\n f\"qids in ground_truth and submission must match. \" \\\n f\"use `match_number=False` if you wish to disable this check\"\n else: # only leave the items that exists in both submission and ground_truth\n shared_qids = pred_qids.intersection(gt_qids)\n submission = [e for e in submission if e[\"qid\"] in shared_qids]\n ground_truth = [e for e in ground_truth if e[\"qid\"] in shared_qids]\n\n eval_metrics = {}\n eval_metrics_brief = OrderedDict()\n if \"pred_relevant_windows\" in submission[0]:\n moment_ret_scores = eval_moment_retrieval(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(moment_ret_scores)\n moment_ret_scores_brief = {\n \"MR-full-mAP\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"average\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.75\"],\n \"MR-short-mAP\": moment_ret_scores[\"short\"][\"MR-mAP\"][\"average\"],\n \"MR-middle-mAP\": moment_ret_scores[\"middle\"][\"MR-mAP\"][\"average\"],\n \"MR-long-mAP\": moment_ret_scores[\"long\"][\"MR-mAP\"][\"average\"],\n \"MR-full-mIoU\": moment_ret_scores[\"full\"][\"MR-mIoU\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.3\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.7\"],\n }\n eval_metrics_brief.update(\n sorted([(k, v) for k, v in moment_ret_scores_brief.items()], key=lambda x: x[0]))\n\n if \"pred_saliency_scores\" in submission[0]:\n highlight_det_scores = eval_highlight(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(highlight_det_scores)\n highlight_det_scores_brief = dict([\n (f\"{k}-{sub_k.split('-')[1]}\", v[sub_k])\n for k, v in highlight_det_scores.items() for sub_k in v])\n eval_metrics_brief.update(highlight_det_scores_brief)\n\n # sort by keys\n final_eval_metrics = OrderedDict()\n final_eval_metrics[\"brief\"] = eval_metrics_brief\n final_eval_metrics.update(sorted([(k, v) for k, v in eval_metrics.items()], key=lambda x: x[0]))\n return final_eval_metrics" }, { "identifier": "save_jsonl", "path": "utils/basic_utils.py", "snippet": "def save_jsonl(data, filename):\n \"\"\"data is a list\"\"\"\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join([json.dumps(e) for e in data]))" }, { "identifier": "save_json", "path": "utils/basic_utils.py", "snippet": "def save_json(data, filename, save_pretty=False, sort_keys=False):\n with open(filename, \"w\") as f:\n if save_pretty:\n f.write(json.dumps(data, indent=4, sort_keys=sort_keys))\n else:\n json.dump(data, f)" }, { "identifier": "temporal_nms", "path": "utils/temporal_nms.py", "snippet": "def temporal_nms(predictions, nms_thd, max_after_nms=100):\n \"\"\"\n Args:\n predictions: list(sublist), each sublist is [st (float), ed(float), score (float)],\n note larger scores are better and are preserved. For metrics that are better when smaller,\n please convert to its negative, e.g., convert distance to negative distance.\n nms_thd: float in [0, 1]\n max_after_nms:\n Returns:\n predictions_after_nms: list(sublist), each sublist is [st (float), ed(float), score (float)]\n References:\n https://github.com/wzmsltw/BSN-boundary-sensitive-network/blob/7b101fc5978802aa3c95ba5779eb54151c6173c6/Post_processing.py#L42\n \"\"\"\n if len(predictions) == 1: # only has one prediction, no need for nms\n return predictions\n\n predictions = sorted(predictions, key=lambda x: x[2], reverse=True) # descending order\n\n tstart = [e[0] for e in predictions]\n tend = [e[1] for e in predictions]\n tscore = [e[2] for e in predictions]\n rstart = []\n rend = []\n rscore = []\n while len(tstart) > 1 and len(rscore) < max_after_nms: # max 100 after nms\n idx = 1\n while idx < len(tstart): # compare with every prediction in the list.\n if compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]) > nms_thd:\n # rm highly overlapped lower score entries.\n tstart.pop(idx)\n tend.pop(idx)\n tscore.pop(idx)\n # print(\"--------------------------------\")\n # print(compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]))\n # print([tstart[0], tend[0]], [tstart[idx], tend[idx]])\n # print(tstart.pop(idx), tend.pop(idx), tscore.pop(idx))\n else:\n # move to next\n idx += 1\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n if len(rscore) < max_after_nms and len(tstart) >= 1: # add the last, possibly empty.\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n predictions_after_nms = [[st, ed, s] for s, st, ed in zip(rscore, rstart, rend)]\n return predictions_after_nms" } ]
import pprint import numpy as np import os import torch import torch.nn.functional as F import torch.backends.cudnn as cudnn import logging from tqdm import tqdm, trange from collections import OrderedDict, defaultdict from utils.basic_utils import AverageMeter from torch.utils.data import DataLoader from cg_detr.config import TestOptions from cg_detr.model import build_model from cg_detr.span_utils import span_cxw_to_xx from cg_detr.start_end_dataset import StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.postprocessing_cg_detr import PostProcessorDETR from standalone_eval.eval import eval_submission from utils.basic_utils import save_jsonl, save_json from utils.temporal_nms import temporal_nms from collections import OrderedDict from sys import argv
13,511
min_w_l=12, max_w_l=360, move_window_method="left", process_func_names=("clip_ts", "round_multiple") ) else: post_processor = PostProcessorDETR( clip_length=opt.clip_length, min_ts_val=0, max_ts_val=150, min_w_l=2, max_w_l=60, move_window_method="left", process_func_names=("clip_ts", "round_multiple") ) else: post_processor = PostProcessorDETR( clip_length=opt.clip_length, min_ts_val=0, max_ts_val=50000, min_w_l=0, max_w_l=50000, move_window_method="left", process_func_names=(["round_multiple"]) ) mr_res = post_processor(mr_res) return mr_res, loss_meters def get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer): """compute and save query and video proposal embeddings""" eval_res, eval_loss_meters = compute_mr_results(model, eval_loader, opt, epoch_i, criterion, tb_writer) # list(dict) return eval_res, eval_loss_meters def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None): logger.info("Generate submissions") model.eval() if criterion is not None and eval_dataset.load_labels: criterion.eval() else: criterion = None if opt.dset_name == 'tacos': shuffle = True else: shuffle = False eval_loader = DataLoader( eval_dataset, collate_fn=start_end_collate, batch_size=opt.eval_bsz, num_workers=opt.num_workers, shuffle=shuffle, pin_memory=opt.pin_memory ) # tvsum if opt.dset_name in ['tvsum', 'youtube_uni']: metrics, eval_loss_meters = compute_hl_results(model, eval_loader, opt, epoch_i, criterion, tb_writer) # to match original save format submission = [ {"brief": metrics} ] submission_path = os.path.join(opt.results_dir, "latest_metric.jsonl") save_jsonl(submission, submission_path) return submission[0], submission[0], eval_loss_meters, [submission_path] else: submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer) if opt.dset_name in ['charadesSTA', 'tacos', 'nlq']: new_submission = [] for s in submission: s.pop('pred_saliency_scores', None) new_submission.append(s) submission = new_submission if opt.no_sort_results: save_submission_filename = save_submission_filename.replace(".jsonl", "_unsorted.jsonl") metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing( submission, opt, eval_dataset.data, save_submission_filename) return metrics, metrics_nms, eval_loss_meters, latest_file_paths def setup_model(opt): """setup model/optimizer/scheduler and load checkpoints when needed""" logger.info("setup model/optimizer/scheduler") model, criterion = build_model(opt) if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) criterion.to(opt.device) param_dicts = [{"params": [p for n, p in model.named_parameters() if p.requires_grad]}] optimizer = torch.optim.AdamW(param_dicts, lr=opt.lr, weight_decay=opt.wd) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_drop) if opt.resume is not None: logger.info(f"Load checkpoint from {opt.resume}") checkpoint = torch.load(opt.resume, map_location="cpu") new_state_dict = OrderedDict() if 'pt' in opt.resume[:-4]: if 'asr' in opt.resume[:25]: model.load_state_dict(checkpoint["model"]) else: for k, v in checkpoint["model"].items(): name = k[7:] # remove `module.` new_state_dict[name] = v # model.load_state_dict(checkpoint["model"]) model.load_state_dict(new_state_dict) else: model.load_state_dict(checkpoint["model"]) if opt.resume_all: optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) opt.start_epoch = checkpoint['epoch'] + 1 logger.info(f"Loaded model saved at epoch {checkpoint['epoch']} from checkpoint: {opt.resume}") else: logger.warning("If you intend to evaluate the model, please specify --resume with ckpt path") return model, criterion, optimizer, lr_scheduler def start_inference(train_opt=None, split=None, splitfile=None): if train_opt is not None:
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): mr_res_after_nms = [] for e in mr_res: e["pred_relevant_windows"] = temporal_nms( e["pred_relevant_windows"][:max_before_nms], nms_thd=nms_thd, max_after_nms=max_after_nms ) mr_res_after_nms.append(e) return mr_res_after_nms def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): # IOU_THDS = (0.5, 0.7) logger.info("Saving/Evaluating before nms results") submission_path = os.path.join(opt.results_dir, save_submission_filename) save_jsonl(submission, submission_path) if opt.eval_split_name in ["val"]: # since test_public has no GT metrics = eval_submission( submission, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_path = submission_path.replace(".jsonl", "_metrics.json") save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False) latest_file_paths = [submission_path, save_metrics_path] else: metrics = None latest_file_paths = [submission_path, ] if opt.nms_thd != -1: logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd)) submission_after_nms = post_processing_mr_nms( submission, nms_thd=opt.nms_thd, max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms ) logger.info("Saving/Evaluating nms results") submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd)) save_jsonl(submission_after_nms, submission_nms_path) if opt.eval_split_name == "val": metrics_nms = eval_submission( submission_after_nms, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json") save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False) latest_file_paths += [submission_nms_path, save_metrics_nms_path] else: metrics_nms = None latest_file_paths = [submission_nms_path, ] else: metrics_nms = None return metrics, metrics_nms, latest_file_paths # for HL @torch.no_grad() def compute_hl_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None): model.eval() if criterion: assert eval_loader.dataset.load_labels criterion.eval() loss_meters = defaultdict(AverageMeter) write_tb = tb_writer is not None and epoch_i is not None mr_res = [] topk = 5 # top-5 map video_ap_collected = [] for batch in tqdm(eval_loader, desc="compute st ed scores"): query_meta = batch[0] model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory) outputs = model(**model_inputs) # loss meters # if criterion: # loss_dict = criterion(outputs, targets) # weight_dict = criterion.weight_dict # print(loss_dict) # print(weight_dict) # print('#######') # {'loss_saliency': tensor(18.1374, device='cuda:0')} # {'loss_span': 10, 'loss_giou': 1, 'loss_label': 4, 'loss_saliency': 1.0, 'loss_ms_align': 1.0, # 'loss_distill': 1.0, 'loss_span_0': 10, 'loss_giou_0': 1, 'loss_label_0': 4, 'loss_ms_align_0': 1.0, # 'loss_distill_0': 1.0} # losses=0. # print(loss_dict.keys(), weight_dict.keys()) # losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) # loss_dict["loss_overall"] = float(losses) # for logging only # print(loss_dict.items()) # # print(weight_dict.items()) # for k, v in loss_dict.items(): # loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) preds = outputs['saliency_scores'].clone().detach() for meta, pred in zip(query_meta, preds): pred = pred label = meta['label'] # raw label video_ap = [] # Follow the UMT code "https://github.com/TencentARC/UMT/blob/main/datasets/tvsum.py" if opt.dset_name in ["tvsum"]: for i in range(20): pred=pred.cpu() cur_pred = pred[:len(label)] inds = torch.argsort(cur_pred, descending=True, dim=-1) # video_id = self.get_video_id(idx) cur_label = torch.Tensor(label)[:, i] cur_label = torch.where(cur_label > cur_label.median(), 1.0, .0) cur_label = cur_label[inds].tolist()[:topk] # if (num_gt := sum(cur_label)) == 0: num_gt = sum(cur_label) if num_gt == 0: video_ap.append(0) continue hits = ap = rec = 0 prc = 1 for j, gt in enumerate(cur_label): hits += gt _rec = hits / num_gt _prc = hits / (j + 1) ap += (_rec - rec) * (prc + _prc) / 2 rec, prc = _rec, _prc video_ap.append(ap) elif opt.dset_name in ["youtube_uni"]: cur_pred = pred[:len(label)] # if opt.dset_name == "tvsum_sfc": cur_pred = cur_pred.cpu() inds = torch.argsort(cur_pred, descending=True, dim=-1) cur_label = torch.Tensor(label).squeeze()[inds].tolist() num_gt = sum(cur_label) if num_gt == 0: video_ap.append(0) continue hits = ap = rec = 0 prc = 1 for j, gt in enumerate(cur_label): hits += gt _rec = hits / num_gt _prc = hits / (j + 1) ap += (_rec - rec) * (prc + _prc) / 2 rec, prc = _rec, _prc video_ap.append(float(ap)) else: print("No such dataset") exit(-1) video_ap_collected.append(video_ap) mean_ap = np.mean(video_ap_collected) submmission = dict(mAP=round(mean_ap, 5)) # tensorboard writer if write_tb and criterion: for k, v in loss_meters.items(): tb_writer.add_scalar("Eval/{}".format(k), v.avg, epoch_i + 1) return submmission, loss_meters @torch.no_grad() def compute_mr_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None): model.eval() if criterion: assert eval_loader.dataset.load_labels criterion.eval() loss_meters = defaultdict(AverageMeter) write_tb = tb_writer is not None and epoch_i is not None mr_res = [] for batch in tqdm(eval_loader, desc="compute st ed scores"): query_meta = batch[0] model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory) outputs = model(**model_inputs) prob = F.softmax(outputs["pred_logits"], -1) # (batch_size, #queries, #classes=2) if opt.span_loss_type == "l1": scores = prob[..., 0] # * (batch_size, #queries) foreground label is 0, we directly take it pred_spans = outputs["pred_spans"] # (bsz, #queries, 2) _saliency_scores = outputs["saliency_scores"].half() # (bsz, L) saliency_scores = [] valid_vid_lengths = model_inputs["src_vid_mask"].sum(1).cpu().tolist() for j in range(len(valid_vid_lengths)): saliency_scores.append(_saliency_scores[j, :int(valid_vid_lengths[j])].tolist()) else: bsz, n_queries = outputs["pred_spans"].shape[:2] # # (bsz, #queries, max_v_l *2) pred_spans_logits = outputs["pred_spans"].view(bsz, n_queries, 2, opt.max_v_l) pred_span_scores, pred_spans = F.softmax(pred_spans_logits, dim=-1).max(-1) # 2 * (bsz, #queries, 2) scores = torch.prod(pred_span_scores, 2) # (bsz, #queries) pred_spans[:, 1] += 1 pred_spans *= opt.clip_length # compose predictions for idx, (meta, spans, score) in enumerate(zip(query_meta, pred_spans.cpu(), scores.cpu())): if opt.span_loss_type == "l1": spans = span_cxw_to_xx(spans) * meta["duration"] spans = torch.clamp(spans, 0, meta["duration"]) # # (#queries, 3), [st(float), ed(float), score(float)] cur_ranked_preds = torch.cat([spans, score[:, None]], dim=1).tolist() if not opt.no_sort_results: cur_ranked_preds = sorted(cur_ranked_preds, key=lambda x: x[2], reverse=True) cur_ranked_preds = [[float(f"{e:.4f}") for e in row] for row in cur_ranked_preds] cur_query_pred = dict( qid=meta["qid"], query=meta["query"], vid=meta["vid"], pred_relevant_windows=cur_ranked_preds, pred_saliency_scores=saliency_scores[idx] ) mr_res.append(cur_query_pred) if criterion: loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) loss_dict["loss_overall"] = float(losses) # for logging only for k, v in loss_dict.items(): loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) if opt.debug: break if write_tb and criterion: for k, v in loss_meters.items(): tb_writer.add_scalar("Eval/{}".format(k), v.avg, epoch_i + 1) if opt.dset_name in ['hl']: post_processor = PostProcessorDETR( clip_length=opt.clip_length, min_ts_val=0, max_ts_val=150, min_w_l=2, max_w_l=150, move_window_method="left", process_func_names=("clip_ts", "round_multiple") ) elif opt.dset_name in ['charadesSTA']: if opt.v_feat_dim == 4096: # vgg post_processor = PostProcessorDETR( clip_length=opt.clip_length, min_ts_val=0, max_ts_val=360, min_w_l=12, max_w_l=360, move_window_method="left", process_func_names=("clip_ts", "round_multiple") ) else: post_processor = PostProcessorDETR( clip_length=opt.clip_length, min_ts_val=0, max_ts_val=150, min_w_l=2, max_w_l=60, move_window_method="left", process_func_names=("clip_ts", "round_multiple") ) else: post_processor = PostProcessorDETR( clip_length=opt.clip_length, min_ts_val=0, max_ts_val=50000, min_w_l=0, max_w_l=50000, move_window_method="left", process_func_names=(["round_multiple"]) ) mr_res = post_processor(mr_res) return mr_res, loss_meters def get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer): """compute and save query and video proposal embeddings""" eval_res, eval_loss_meters = compute_mr_results(model, eval_loader, opt, epoch_i, criterion, tb_writer) # list(dict) return eval_res, eval_loss_meters def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None): logger.info("Generate submissions") model.eval() if criterion is not None and eval_dataset.load_labels: criterion.eval() else: criterion = None if opt.dset_name == 'tacos': shuffle = True else: shuffle = False eval_loader = DataLoader( eval_dataset, collate_fn=start_end_collate, batch_size=opt.eval_bsz, num_workers=opt.num_workers, shuffle=shuffle, pin_memory=opt.pin_memory ) # tvsum if opt.dset_name in ['tvsum', 'youtube_uni']: metrics, eval_loss_meters = compute_hl_results(model, eval_loader, opt, epoch_i, criterion, tb_writer) # to match original save format submission = [ {"brief": metrics} ] submission_path = os.path.join(opt.results_dir, "latest_metric.jsonl") save_jsonl(submission, submission_path) return submission[0], submission[0], eval_loss_meters, [submission_path] else: submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer) if opt.dset_name in ['charadesSTA', 'tacos', 'nlq']: new_submission = [] for s in submission: s.pop('pred_saliency_scores', None) new_submission.append(s) submission = new_submission if opt.no_sort_results: save_submission_filename = save_submission_filename.replace(".jsonl", "_unsorted.jsonl") metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing( submission, opt, eval_dataset.data, save_submission_filename) return metrics, metrics_nms, eval_loss_meters, latest_file_paths def setup_model(opt): """setup model/optimizer/scheduler and load checkpoints when needed""" logger.info("setup model/optimizer/scheduler") model, criterion = build_model(opt) if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) criterion.to(opt.device) param_dicts = [{"params": [p for n, p in model.named_parameters() if p.requires_grad]}] optimizer = torch.optim.AdamW(param_dicts, lr=opt.lr, weight_decay=opt.wd) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_drop) if opt.resume is not None: logger.info(f"Load checkpoint from {opt.resume}") checkpoint = torch.load(opt.resume, map_location="cpu") new_state_dict = OrderedDict() if 'pt' in opt.resume[:-4]: if 'asr' in opt.resume[:25]: model.load_state_dict(checkpoint["model"]) else: for k, v in checkpoint["model"].items(): name = k[7:] # remove `module.` new_state_dict[name] = v # model.load_state_dict(checkpoint["model"]) model.load_state_dict(new_state_dict) else: model.load_state_dict(checkpoint["model"]) if opt.resume_all: optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) opt.start_epoch = checkpoint['epoch'] + 1 logger.info(f"Loaded model saved at epoch {checkpoint['epoch']} from checkpoint: {opt.resume}") else: logger.warning("If you intend to evaluate the model, please specify --resume with ckpt path") return model, criterion, optimizer, lr_scheduler def start_inference(train_opt=None, split=None, splitfile=None): if train_opt is not None:
opt = TestOptions().parse(train_opt.a_feat_dir)
1
2023-11-10 12:45:25+00:00
16k
dazhangyu123/ACMIL
Step1_create_patches_fp.py
[ { "identifier": "WholeSlideImage", "path": "wsi_core/WholeSlideImage.py", "snippet": "class WholeSlideImage(object):\n def __init__(self, path):\n\n \"\"\"\n Args:\n path (str): fullpath to WSI file\n \"\"\"\n\n# self.name = \".\".join(path.split(\"/\")[-1].split('.')[:-1])\n self.name = os.path.splitext(os.path.basename(path))[0]\n # pdb.set_trace()\n try:\n self.wsi = openslide.open_slide(path)\n except:\n self.wsi = kfbslide.open_kfbslide(path)\n # self.wsi = openSlide(path)\n # pdb.set_trace()\n self.level_downsamples = self._assertLevelDownsamples()\n self.level_dim = self.wsi.level_dimensions\n\n self.contours_tissue = None\n self.contours_tumor = None\n self.hdf5_file = None\n\n def getOpenSlide(self):\n return self.wsi\n\n def initXML(self, xml_path):\n def _createContour(coord_list):\n return np.array([[[int(float(coord.attributes['X'].value)), \n int(float(coord.attributes['Y'].value))]] for coord in coord_list], dtype = 'int32')\n\n xmldoc = minidom.parse(xml_path)\n annotations = [anno.getElementsByTagName('Coordinate') for anno in xmldoc.getElementsByTagName('Annotation')]\n self.contours_tumor = [_createContour(coord_list) for coord_list in annotations]\n self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)\n\n def initTxt(self,annot_path):\n def _create_contours_from_dict(annot):\n all_cnts = []\n for idx, annot_group in enumerate(annot):\n contour_group = annot_group['coordinates']\n if annot_group['type'] == 'Polygon':\n for idx, contour in enumerate(contour_group):\n contour = np.array(contour).astype(np.int32).reshape(-1,1,2)\n all_cnts.append(contour) \n\n else:\n for idx, sgmt_group in enumerate(contour_group):\n contour = []\n for sgmt in sgmt_group:\n contour.extend(sgmt)\n contour = np.array(contour).astype(np.int32).reshape(-1,1,2) \n all_cnts.append(contour) \n\n return all_cnts\n \n with open(annot_path, \"r\") as f:\n annot = f.read()\n annot = eval(annot)\n self.contours_tumor = _create_contours_from_dict(annot)\n self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)\n\n def initSegmentation(self, mask_file):\n # load segmentation results from pickle file\n import pickle\n asset_dict = load_pkl(mask_file)\n self.holes_tissue = asset_dict['holes']\n self.contours_tissue = asset_dict['tissue']\n\n def saveSegmentation(self, mask_file):\n # save segmentation results using pickle\n asset_dict = {'holes': self.holes_tissue, 'tissue': self.contours_tissue}\n save_pkl(mask_file, asset_dict)\n\n def segmentTissue(self, seg_level=0, sthresh=20, sthresh_up = 255, mthresh=7, close = 0, use_otsu=False, \n filter_params={'a_t':100}, ref_patch_size=512, exclude_ids=[], keep_ids=[]):\n \"\"\"\n Segment the tissue via HSV -> Median thresholding -> Binary threshold\n \"\"\"\n \n def _filter_contours(contours, hierarchy, filter_params):\n \"\"\"\n Filter contours by: area.\n \"\"\"\n filtered = []\n\n # find indices of foreground contours (parent == -1)\n hierarchy_1 = np.flatnonzero(hierarchy[:,1] == -1)\n all_holes = []\n \n # loop through foreground contour indices\n for cont_idx in hierarchy_1:\n # actual contour\n # pdb.set_trace()\n\n cont = contours[cont_idx]\n # indices of holes contained in this contour (children of parent contour)\n holes = np.flatnonzero(hierarchy[:, 1] == cont_idx)\n # take contour area (includes holes)\n a = cv2.contourArea(cont)\n # calculate the contour area of each hole\n hole_areas = [cv2.contourArea(contours[hole_idx]) for hole_idx in holes]\n # actual area of foreground contour region\n a = a - np.array(hole_areas).sum()\n if a == 0: continue\n # print(tuple((filter_params['a_t'],)),tuple((a,)))\n if tuple((filter_params['a_t'],)) < tuple((a,)): \n filtered.append(cont_idx)\n all_holes.append(holes)\n\n\n foreground_contours = [contours[cont_idx] for cont_idx in filtered]\n \n hole_contours = []\n\n for hole_ids in all_holes:\n unfiltered_holes = [contours[idx] for idx in hole_ids ]\n unfilered_holes = sorted(unfiltered_holes, key=cv2.contourArea, reverse=True)\n # take max_n_holes largest holes by area\n unfilered_holes = unfilered_holes[:filter_params['max_n_holes']]\n filtered_holes = []\n \n # filter these holes\n for hole in unfilered_holes:\n if cv2.contourArea(hole) > filter_params['a_h']:\n filtered_holes.append(hole)\n\n hole_contours.append(filtered_holes)\n\n return foreground_contours, hole_contours\n # pdb.set_trace()\n try:\n img = np.array(self.wsi.read_region((0,0), seg_level, self.level_dim[seg_level]))\n except:\n print('failed read region')\n img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # Convert to HSV space\n img_med = cv2.medianBlur(img_hsv[:,:,1], mthresh) # Apply median blurring\n \n \n # Thresholding\n # if use_otsu:\n if False:\n otsu_thresh, img_otsu = cv2.threshold(img_med, 0, sthresh_up, cv2.THRESH_OTSU+cv2.THRESH_BINARY)\n # adjust_thresh = max(sthresh,otsu_thresh-20)\n adjust_thresh = otsu_thresh\n _, img_otsu = cv2.threshold(img_med, adjust_thresh, sthresh_up, cv2.THRESH_BINARY)\n print('otsu_threshold:',otsu_thresh,'adjust_thresh:',adjust_thresh)\n else:\n print('not otsu')\n _, img_otsu = cv2.threshold(img_med, sthresh, sthresh_up, cv2.THRESH_BINARY)\n # pdb.set_trace()\n ## hed operas\n # img_hed = rgb2hed(cv2.cvtColor(img, cv2.COLOR_RGBA2RGB))\n # # img_e = hed2rgb(np.stack((img_hed[:, :, 1], img_hed[:, :, 1], img_hed[:, :, 1]), axis=-1))\n # img_h = hed2rgb(np.stack((img_hed[:, :, 0], np.zeros_like(img_hed[:, :, 0]), np.zeros_like(img_hed[:, :, 0])), axis=-1))\n # img_h = (img_h*255).astype(np.uint8)\n # img_h_gray = 255-cv2.medianBlur(cv2.cvtColor(img_h, cv2.COLOR_BGR2GRAY),mthresh)\n # # _, img_otsu = cv2.threshold(img_h_gray, sthresh, sthresh_up, cv2.THRESH_BINARY)\n # otsu_thresh, img_otsu = cv2.threshold(img_h_gray, 0, sthresh_up, cv2.THRESH_OTSU + cv2.THRESH_BINARY)\n # adjust_thresh = max(sthresh,otsu_thresh-20)\n # _, img_otsu = cv2.threshold(img_h_gray, adjust_thresh, sthresh_up, cv2.THRESH_BINARY)\n\n # img_d = hed2rgb(np.stack((img_hed[:, :, 2], img_hed[:, :, 2], img_hed[:, :, 2]), axis=-1))\n # filter this?\n # Morphological closing\n if close > 0:\n kernel = np.ones((close, close), np.uint8)\n img_otsu = cv2.morphologyEx(img_otsu, cv2.MORPH_CLOSE, kernel) \n\n scale = self.level_downsamples[seg_level]\n scaled_ref_patch_area = int(ref_patch_size**2 / (scale[0] * scale[1]))\n print('scaled_ref_patch_area',scaled_ref_patch_area)\n print('ref_patch_size',ref_patch_size)\n print('scale',scale,'seg_level',seg_level)\n\n filter_params = filter_params.copy()\n filter_params['a_t'] = filter_params['a_t'] * scaled_ref_patch_area\n filter_params['a_h'] = filter_params['a_h'] * scaled_ref_patch_area\n \n # Find and filter contours\n contours, hierarchy = cv2.findContours(img_otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # Find contours \n hierarchy = np.squeeze(hierarchy, axis=(0,))[:, 2:]\n # pdb.set_trace()\n if filter_params: foreground_contours, hole_contours = _filter_contours(contours, hierarchy, filter_params) # Necessary for filtering out artifacts\n\n self.contours_tissue = self.scaleContourDim(foreground_contours, scale)\n self.holes_tissue = self.scaleHolesDim(hole_contours, scale)\n\n #exclude_ids = [0,7,9]\n if len(keep_ids) > 0:\n contour_ids = set(keep_ids) - set(exclude_ids)\n else:\n contour_ids = set(np.arange(len(self.contours_tissue))) - set(exclude_ids)\n\n self.contours_tissue = [self.contours_tissue[i] for i in contour_ids]\n self.holes_tissue = [self.holes_tissue[i] for i in contour_ids]\n\n def visWSI(self, vis_level=0, color = (0,255,0), hole_color = (0,0,255), annot_color=(255,0,0), \n line_thickness=250, max_size=None, top_left=None, bot_right=None, custom_downsample=1, view_slide_only=False,\n number_contours=False, seg_display=True, annot_display=True):\n \n downsample = self.level_downsamples[vis_level]\n scale = [1/downsample[0], 1/downsample[1]]\n # pdb.set_trace()\n if top_left is not None and bot_right is not None:\n top_left = tuple(top_left)\n bot_right = tuple(bot_right)\n w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int))\n region_size = (w, h)\n else:\n top_left = (0,0)\n region_size = self.level_dim[vis_level]\n img = self.wsi.read_region(top_left, vis_level, region_size)\n try:\n img = np.array(img.convert(\"RGB\"))\n except:\n pass\n\n # view_slide_only= True\n if not view_slide_only:\n offset = tuple(-(np.array(top_left) * scale).astype(int))\n line_thickness = int(line_thickness * math.sqrt(scale[0] * scale[1]))\n if self.contours_tissue is not None and seg_display:\n if not number_contours:\n cv2.drawContours(img, self.scaleContourDim(self.contours_tissue, scale), \n -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset)\n\n else: # add numbering to each contour\n for idx, cont in enumerate(self.contours_tissue):\n contour = np.array(self.scaleContourDim(cont, scale))\n M = cv2.moments(contour)\n cX = int(M[\"m10\"] / (M[\"m00\"] + 1e-9))\n cY = int(M[\"m01\"] / (M[\"m00\"] + 1e-9))\n # draw the contour and put text next to center\n cv2.drawContours(img, [contour], -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset)\n cv2.putText(img, \"{}\".format(idx), (cX, cY),\n cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 10)\n\n for holes in self.holes_tissue:\n cv2.drawContours(img, self.scaleContourDim(holes, scale), \n -1, hole_color, line_thickness, lineType=cv2.LINE_8)\n \n if self.contours_tumor is not None and annot_display:\n cv2.drawContours(img, self.scaleContourDim(self.contours_tumor, scale), \n -1, annot_color, line_thickness, lineType=cv2.LINE_8, offset=offset)\n \n img = Image.fromarray(img)\n \n w, h = img.size\n if custom_downsample > 1:\n img = img.resize((int(w/custom_downsample), int(h/custom_downsample)))\n\n if max_size is not None and (w > max_size or h > max_size):\n resizeFactor = max_size/w if w > h else max_size/h\n img = img.resize((int(w*resizeFactor), int(h*resizeFactor)))\n \n return img\n\n\n def createPatches_bag_hdf5(self, save_path, patch_level=0, patch_size=256, step_size=256, save_coord=True, **kwargs):\n contours = self.contours_tissue\n contour_holes = self.holes_tissue\n\n print(\"Creating patches for: \", self.name, \"...\",)\n elapsed = time.time()\n for idx, cont in enumerate(contours):\n patch_gen = self._getPatchGenerator(cont, idx, patch_level, save_path, patch_size, step_size, **kwargs)\n \n if self.hdf5_file is None:\n try:\n first_patch = next(patch_gen)\n\n # empty contour, continue\n except StopIteration:\n continue\n\n file_path = initialize_hdf5_bag(first_patch, save_coord=save_coord)\n self.hdf5_file = file_path\n\n for patch in patch_gen:\n savePatchIter_bag_hdf5(patch)\n\n return self.hdf5_file\n\n def createTopkPatches_bag_hdf5(self, save_path, target_coords, patch_level=1, patch_size=256, step_size=256, save_coord=True,\n **kwargs):\n print(\"Creating patches for: \", self.name, \"...\", )\n topk_list = []\n for idx, coord in enumerate(target_coords):\n x, y = coord\n patch_PIL = self.wsi.read_region((x, y), patch_level, (patch_size, patch_size)).convert('RGB')\n topk_list.append(np.array(patch_PIL))\n\n # save_dict = {'patches':np.asarray(topk_list),'coords':target_coords}\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n saved_path = os.path.join(save_path, self.name + '.h5')\n if not os.path.exists(saved_path):\n f=h5py.File(saved_path,'w')\n f.create_dataset('patches',data=np.asarray(topk_list))\n f.create_dataset('coords',data=target_coords)\n f.close()\n\n return\n\n def _getPatchGenerator(self, cont, cont_idx, patch_level, save_path, patch_size=256, step_size=256, custom_downsample=1,\n white_black=True, white_thresh=15, black_thresh=50, contour_fn='four_pt', use_padding=True):\n start_x, start_y, w, h = cv2.boundingRect(cont) if cont is not None else (0, 0, self.level_dim[patch_level][0], self.level_dim[patch_level][1])\n print(\"Bounding Box:\", start_x, start_y, w, h)\n print(\"Contour Area:\", cv2.contourArea(cont))\n \n if custom_downsample > 1:\n assert custom_downsample == 2 \n target_patch_size = patch_size\n patch_size = target_patch_size * 2\n step_size = step_size * 2\n print(\"Custom Downsample: {}, Patching at {} x {}, But Final Patch Size is {} x {}\".format(custom_downsample, patch_size, patch_size, \n target_patch_size, target_patch_size))\n\n patch_downsample = (int(self.level_downsamples[patch_level][0]), int(self.level_downsamples[patch_level][1]))\n ref_patch_size = (patch_size*patch_downsample[0], patch_size*patch_downsample[1])\n \n step_size_x = step_size * patch_downsample[0]\n step_size_y = step_size * patch_downsample[1]\n \n if isinstance(contour_fn, str):\n if contour_fn == 'four_pt':\n cont_check_fn = isInContourV3_Easy(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'four_pt_hard':\n cont_check_fn = isInContourV3_Hard(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'center':\n cont_check_fn = isInContourV2(contour=cont, patch_size=ref_patch_size[0])\n elif contour_fn == 'basic':\n cont_check_fn = isInContourV1(contour=cont)\n else:\n raise NotImplementedError\n else:\n assert isinstance(contour_fn, Contour_Checking_fn)\n cont_check_fn = contour_fn\n\n img_w, img_h = self.level_dim[0]\n if use_padding:\n stop_y = start_y+h\n stop_x = start_x+w\n else:\n stop_y = min(start_y+h, img_h-ref_patch_size[1])\n stop_x = min(start_x+w, img_w-ref_patch_size[0])\n\n count = 0\n for y in range(start_y, stop_y, step_size_y):\n for x in range(start_x, stop_x, step_size_x):\n\n if not self.isInContours(cont_check_fn, (x,y), self.holes_tissue[cont_idx], ref_patch_size[0]): #point not inside contour and its associated holes\n continue \n \n count+=1\n patch_PIL = self.wsi.read_region((x,y), patch_level, (patch_size, patch_size)).convert('RGB')\n if custom_downsample > 1:\n patch_PIL = patch_PIL.resize((target_patch_size, target_patch_size))\n \n if white_black:\n if isBlackPatch(np.array(patch_PIL), rgbThresh=black_thresh) or isWhitePatch(np.array(patch_PIL), satThresh=white_thresh): \n continue\n\n patch_info = {'x':x // (patch_downsample[0] * custom_downsample), 'y':y // (patch_downsample[1] * custom_downsample), 'cont_idx':cont_idx, 'patch_level':patch_level, \n 'downsample': self.level_downsamples[patch_level], 'downsampled_level_dim': tuple(np.array(self.level_dim[patch_level])//custom_downsample), 'level_dim': self.level_dim[patch_level],\n 'patch_PIL':patch_PIL, 'name':self.name, 'save_path':save_path}\n\n yield patch_info\n\n \n print(\"patches extracted: {}\".format(count))\n\n @staticmethod\n def isInHoles(holes, pt, patch_size):\n for hole in holes:\n print((pt[0]+patch_size/2, pt[1]+patch_size/2))\n # pdb.set_trace()\n if cv2.pointPolygonTest(hole, (pt[0]+patch_size/2, pt[1]+patch_size/2), False) > 0:\n return 1\n \n return 0\n\n @staticmethod\n def isInContours(cont_check_fn, pt, holes=None, patch_size=256):\n if cont_check_fn(pt):\n if holes is not None:\n return not WholeSlideImage.isInHoles(holes, pt, patch_size)\n else:\n return 1\n return 0\n \n @staticmethod\n def scaleContourDim(contours, scale):\n return [np.array(cont * scale, dtype='int32') for cont in contours]\n\n @staticmethod\n def scaleHolesDim(contours, scale):\n return [[np.array(hole * scale, dtype = 'int32') for hole in holes] for holes in contours]\n\n def _assertLevelDownsamples(self):\n level_downsamples = []\n dim_0 = self.wsi.level_dimensions[0]\n \n for downsample, dim in zip(self.wsi.level_downsamples, self.wsi.level_dimensions):\n try:\n estimated_downsample = (dim_0[0]/float(dim[0]), dim_0[1]/float(dim[1]))\n except:\n continue\n level_downsamples.append(estimated_downsample) if estimated_downsample != (downsample, downsample) else level_downsamples.append((downsample, downsample))\n \n return level_downsamples\n\n def process_contours(self, save_path, patch_level=0, patch_size=256, step_size=256, **kwargs):\n save_path_hdf5 = os.path.join(save_path, str(self.name) + '.h5')\n print(\"Creating patches for: \", self.name, \"...\",)\n elapsed = time.time()\n n_contours = len(self.contours_tissue)\n print(\"Total number of contours to process: \", n_contours)\n fp_chunk_size = math.ceil(n_contours * 0.05)\n init = True\n for idx, cont in enumerate(self.contours_tissue):\n if (idx + 1) % fp_chunk_size == fp_chunk_size:\n print('Processing contour {}/{}'.format(idx, n_contours))\n # pdb.set_trace()\n asset_dict, attr_dict = self.process_contour(cont, self.holes_tissue[idx], patch_level, save_path, patch_size, step_size, **kwargs)\n if len(asset_dict) > 0:\n if init:\n save_hdf5(save_path_hdf5, asset_dict, attr_dict, mode='w')\n init = False\n else:\n save_hdf5(save_path_hdf5, asset_dict, mode='a')\n\n return self.hdf5_file\n\n\n def process_contour(self, cont, contour_holes, patch_level, save_path, patch_size = 256, step_size = 256,\n contour_fn='four_pt', use_padding=True, top_left=None, bot_right=None):\n # pdb.set_trace()\n start_x, start_y, w, h = cv2.boundingRect(cont) if cont is not None else (0, 0, self.level_dim[patch_level][0], self.level_dim[patch_level][1])\n\n patch_downsample = (int(self.level_downsamples[patch_level][0]), int(self.level_downsamples[patch_level][1]))\n ref_patch_size = (patch_size*patch_downsample[0], patch_size*patch_downsample[1])\n \n img_w, img_h = self.level_dim[0]\n if use_padding:\n stop_y = start_y+h\n stop_x = start_x+w\n else:\n stop_y = min(start_y+h, img_h-ref_patch_size[1]+1)\n stop_x = min(start_x+w, img_w-ref_patch_size[0]+1)\n \n print(\"Bounding Box:\", start_x, start_y, w, h)\n print(\"Contour Area:\", cv2.contourArea(cont))\n\n if bot_right is not None:\n stop_y = min(bot_right[1], stop_y)\n stop_x = min(bot_right[0], stop_x)\n if top_left is not None:\n start_y = max(top_left[1], start_y)\n start_x = max(top_left[0], start_x)\n\n if bot_right is not None or top_left is not None:\n w, h = stop_x - start_x, stop_y - start_y\n if w <= 0 or h <= 0:\n print(\"Contour is not in specified ROI, skip\")\n return {}, {}\n else:\n print(\"Adjusted Bounding Box:\", start_x, start_y, w, h)\n \n if isinstance(contour_fn, str):\n if contour_fn == 'four_pt':\n cont_check_fn = isInContourV3_Easy(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'four_pt_hard':\n cont_check_fn = isInContourV3_Hard(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'center':\n cont_check_fn = isInContourV2(contour=cont, patch_size=ref_patch_size[0])\n elif contour_fn == 'basic':\n cont_check_fn = isInContourV1(contour=cont)\n else:\n raise NotImplementedError\n else:\n assert isinstance(contour_fn, Contour_Checking_fn)\n cont_check_fn = contour_fn\n\n \n step_size_x = step_size * patch_downsample[0]\n step_size_y = step_size * patch_downsample[1]\n\n x_range = np.arange(start_x, stop_x, step=step_size_x)\n y_range = np.arange(start_y, stop_y, step=step_size_y)\n x_coords, y_coords = np.meshgrid(x_range, y_range, indexing='ij')\n coord_candidates = np.array([x_coords.flatten(), y_coords.flatten()]).transpose()\n\n num_workers = mp.cpu_count()\n if num_workers > 4:\n num_workers = 4\n pool = mp.Pool(num_workers)\n # pdb.set_trace()\n # iterable = [(coord, contour_holes, ref_patch_size[0], cont_check_fn) for coord in coord_candidates]\n # iter_patch_label = [(coord, self.contours_tumor, ref_patch_size[0], cont_check_fn) for coord in coord_candidates]\n iterable = [[coord, contour_holes, ref_patch_size[0], cont_check_fn] for coord in coord_candidates]\n iter_patch_label = [[coord, self.contours_tumor, ref_patch_size[0], cont_check_fn] for coord in coord_candidates]\n\n results = pool.starmap(WholeSlideImage.process_coord_candidate, iterable)\n labels = pool.starmap(WholeSlideImage.process_coord_candidate, iter_patch_label)\n pool.close()\n\n final_results = []\n final_labels = []\n for res_index in range(len(results)):\n if results[res_index] is not None:\n final_results.append(results[res_index])\n if labels[res_index] is None:\n final_labels.append(1)\n else:\n final_labels.append(0)\n # pdb.set_trace()\n\n # results = np.array([result for result in results if result is not None])\n results = np.asarray(final_results)\n labels = np.asarray(final_labels)\n # print('Extracted {} coordinates'.format(len(results)))\n\n if len(results)>1:\n asset_dict = {'coords' : results,\n 'labels': labels}\n # pdb.set_trace()\n print('patch_shape',results.shape)\n attr = {'patch_size' : patch_size, # To be considered...\n 'patch_level' : patch_level,\n 'downsample': self.level_downsamples[patch_level],\n 'downsampled_level_dim' : tuple(np.array(self.level_dim[patch_level])),\n 'level_dim': self.level_dim[patch_level],\n 'name': self.name,\n 'save_path': save_path}\n\n attr_dict = { 'coords' : attr}\n return asset_dict, attr_dict\n\n else:\n return {}, {}\n\n @staticmethod\n def process_coord_candidate(coord, contour_holes, ref_patch_size, cont_check_fn):\n if WholeSlideImage.isInContours(cont_check_fn, coord, contour_holes, ref_patch_size):\n return coord\n else:\n return None\n\n def visHeatmap(self, scores, coords, vis_level=-1, \n top_left=None, bot_right=None,\n patch_size=(256, 256), \n blank_canvas=False, canvas_color=(220, 20, 50), alpha=0.4, \n blur=False, overlap=0.0, \n segment=True, use_holes=True,\n convert_to_percentiles=False, \n binarize=False, thresh=0.5,\n max_size=None,\n custom_downsample = 1,\n cmap='coolwarm'):\n\n \"\"\"\n Args:\n scores (numpy array of float): Attention scores \n coords (numpy array of int, n_patches x 2): Corresponding coordinates (relative to lvl 0)\n vis_level (int): WSI pyramid level to visualize\n patch_size (tuple of int): Patch dimensions (relative to lvl 0)\n blank_canvas (bool): Whether to use a blank canvas to draw the heatmap (vs. using the original slide)\n canvas_color (tuple of uint8): Canvas color\n alpha (float [0, 1]): blending coefficient for overlaying heatmap onto original slide\n blur (bool): apply gaussian blurring\n overlap (float [0 1]): percentage of overlap between neighboring patches (only affect radius of blurring)\n segment (bool): whether to use tissue segmentation contour (must have already called self.segmentTissue such that \n self.contours_tissue and self.holes_tissue are not None\n use_holes (bool): whether to also clip out detected tissue cavities (only in effect when segment == True)\n convert_to_percentiles (bool): whether to convert attention scores to percentiles\n binarize (bool): only display patches > threshold\n threshold (float): binarization threshold\n max_size (int): Maximum canvas size (clip if goes over)\n custom_downsample (int): additionally downscale the heatmap by specified factor\n cmap (str): name of matplotlib colormap to use\n \"\"\"\n\n if vis_level < 0:\n vis_level = self.wsi.get_best_level_for_downsample(32)\n # pdb.set_trace()\n downsample = self.level_downsamples[vis_level]\n scale = [1/downsample[0], 1/downsample[1]] # Scaling from 0 to desired level\n if len(scores.shape) == 2:\n scores = scores.flatten()\n\n if binarize:\n if thresh < 0:\n threshold = 1.0/len(scores)\n \n else:\n threshold = thresh\n \n else:\n threshold = 0.0\n\n ##### calculate size of heatmap and filter coordinates/scores outside specified bbox region #####\n if top_left is not None and bot_right is not None:\n scores, coords = screen_coords(scores, coords, top_left, bot_right)\n coords = coords - top_left\n top_left = tuple(top_left)\n bot_right = tuple(bot_right)\n w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int))\n region_size = (w, h)\n\n else:\n region_size = self.level_dim[vis_level]\n top_left = (0,0)\n bot_right = self.level_dim[0]\n w, h = region_size\n\n patch_size = np.ceil(np.array(patch_size) * np.array(scale)).astype(int)\n coords = np.ceil(coords * np.array(scale)).astype(int)\n \n print('\\ncreating heatmap for: ')\n print('top_left: ', top_left, 'bot_right: ', bot_right)\n print('w: {}, h: {}'.format(w, h))\n print('scaled patch size: ', patch_size)\n\n ###### normalize filtered scores ######\n if convert_to_percentiles:\n scores = to_percentiles(scores) \n\n scores /= 100\n \n ######## calculate the heatmap of raw attention scores (before colormap) \n # by accumulating scores over overlapped regions ######\n \n # heatmap overlay: tracks attention score over each pixel of heatmap\n # overlay counter: tracks how many times attention score is accumulated over each pixel of heatmap\n overlay = np.full(np.flip(region_size), 0).astype(float)\n counter = np.full(np.flip(region_size), 0).astype(np.uint16) \n count = 0\n for idx in range(len(coords)):\n score = scores[idx]\n coord = coords[idx]\n if score > threshold:\n if binarize:\n score=1.0\n count+=1\n else:\n score=0.0\n # accumulate attention\n overlay[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] += score\n # accumulate counter\n counter[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] += 1\n\n if binarize:\n print('\\nbinarized tiles based on cutoff of {}'.format(threshold))\n print('identified {}/{} patches as positive'.format(count, len(coords)))\n \n # fetch attended region and average accumulated attention\n zero_mask = counter == 0\n\n if binarize:\n overlay[~zero_mask] = np.around(overlay[~zero_mask] / counter[~zero_mask])\n else:\n overlay[~zero_mask] = overlay[~zero_mask] / counter[~zero_mask]\n del counter \n if blur:\n overlay = cv2.GaussianBlur(overlay,tuple((patch_size * (1-overlap)).astype(int) * 2 +1),0) \n\n if segment:\n tissue_mask = self.get_seg_mask(region_size, scale, use_holes=use_holes, offset=tuple(top_left))\n # return Image.fromarray(tissue_mask) # tissue mask\n # pdb.set_trace()\n\n if not blank_canvas:\n # downsample original image and use as canvas\n img = np.array(self.wsi.read_region(top_left, vis_level, region_size).convert(\"RGB\"))\n else:\n # use blank canvas\n img = np.array(Image.new(size=region_size, mode=\"RGB\", color=(255,255,255))) \n\n #return Image.fromarray(img) #raw image\n\n print('\\ncomputing heatmap image')\n print('total of {} patches'.format(len(coords)))\n twenty_percent_chunk = max(1, int(len(coords) * 0.2))\n\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n \n for idx in range(len(coords)):\n if (idx + 1) % twenty_percent_chunk == 0:\n print('progress: {}/{}'.format(idx, len(coords)))\n \n score = scores[idx]\n coord = coords[idx]\n if score >= threshold:\n\n # attention block\n raw_block = overlay[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]]\n \n # image block (either blank canvas or orig image)\n img_block = img[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]].copy()\n\n # color block (cmap applied to attention block)\n color_block = (cmap(raw_block) * 255)[:,:,:3].astype(np.uint8)\n\n if segment:\n # tissue mask block\n mask_block = tissue_mask[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] \n # copy over only tissue masked portion of color block\n img_block[mask_block] = color_block[mask_block]\n else:\n # copy over entire color block\n img_block = color_block\n\n # rewrite image block\n img[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] = img_block.copy()\n \n #return Image.fromarray(img) #overlay\n print('Done')\n del overlay\n\n if blur:\n img = cv2.GaussianBlur(img,tuple((patch_size * (1-overlap)).astype(int) * 2 +1),0) \n\n if alpha < 1.0:\n img = self.block_blending(img, vis_level, top_left, bot_right, alpha=alpha, blank_canvas=blank_canvas, block_size=1024)\n\n if self.contours_tumor is not None:\n cv2.drawContours(img, self.scaleContourDim(self.contours_tumor, scale),\n -1, color=(0,71,171), thickness=4, lineType=cv2.LINE_8, offset=tuple(-(np.array(top_left) * scale).astype(int)))\n\n img = Image.fromarray(img)\n w, h = img.size\n\n if custom_downsample > 1:\n img = img.resize((int(w/custom_downsample), int(h/custom_downsample)))\n\n if max_size is not None and (w > max_size or h > max_size):\n resizeFactor = max_size/w if w > h else max_size/h\n img = img.resize((int(w*resizeFactor), int(h*resizeFactor)))\n \n return img\n\n \n def block_blending(self, img, vis_level, top_left, bot_right, alpha=0.5, blank_canvas=False, block_size=1024):\n print('\\ncomputing blend')\n downsample = self.level_downsamples[vis_level]\n w = img.shape[1]\n h = img.shape[0]\n block_size_x = min(block_size, w)\n block_size_y = min(block_size, h)\n print('using block size: {} x {}'.format(block_size_x, block_size_y))\n\n shift = top_left # amount shifted w.r.t. (0,0)\n for x_start in range(top_left[0], bot_right[0], block_size_x * int(downsample[0])):\n for y_start in range(top_left[1], bot_right[1], block_size_y * int(downsample[1])):\n #print(x_start, y_start)\n\n # 1. convert wsi coordinates to image coordinates via shift and scale\n x_start_img = int((x_start - shift[0]) / int(downsample[0]))\n y_start_img = int((y_start - shift[1]) / int(downsample[1]))\n \n # 2. compute end points of blend tile, careful not to go over the edge of the image\n y_end_img = min(h, y_start_img+block_size_y)\n x_end_img = min(w, x_start_img+block_size_x)\n\n if y_end_img == y_start_img or x_end_img == x_start_img:\n continue\n #print('start_coord: {} end_coord: {}'.format((x_start_img, y_start_img), (x_end_img, y_end_img)))\n \n # 3. fetch blend block and size\n blend_block = img[y_start_img:y_end_img, x_start_img:x_end_img] \n blend_block_size = (x_end_img-x_start_img, y_end_img-y_start_img)\n \n if not blank_canvas:\n # 4. read actual wsi block as canvas block\n pt = (x_start, y_start)\n canvas = np.array(self.wsi.read_region(pt, vis_level, blend_block_size).convert(\"RGB\")) \n else:\n # 4. OR create blank canvas block\n canvas = np.array(Image.new(size=blend_block_size, mode=\"RGB\", color=(255,255,255)))\n\n # 5. blend color block and canvas block\n img[y_start_img:y_end_img, x_start_img:x_end_img] = cv2.addWeighted(blend_block, alpha, canvas, 1 - alpha, 0, canvas)\n return img\n\n def get_seg_mask(self, region_size, scale, use_holes=False, offset=(0,0)):\n print('\\ncomputing foreground tissue mask')\n tissue_mask = np.full(np.flip(region_size), 0).astype(np.uint8)\n contours_tissue = self.scaleContourDim(self.contours_tissue, scale)\n offset = tuple((np.array(offset) * np.array(scale) * -1).astype(np.int32))\n\n contours_holes = self.scaleHolesDim(self.holes_tissue, scale)\n contours_tissue, contours_holes = zip(*sorted(zip(contours_tissue, contours_holes), key=lambda x: cv2.contourArea(x[0]), reverse=True))\n for idx in range(len(contours_tissue)):\n cv2.drawContours(image=tissue_mask, contours=contours_tissue, contourIdx=idx, color=(1), offset=offset, thickness=-1)\n\n if use_holes:\n cv2.drawContours(image=tissue_mask, contours=contours_holes[idx], contourIdx=-1, color=(0), offset=offset, thickness=-1)\n # contours_holes = self._scaleContourDim(self.holes_tissue, scale, holes=True, area_thresh=area_thresh)\n \n tissue_mask = tissue_mask.astype(bool)\n print('detected {}/{} of region as tissue'.format(tissue_mask.sum(), tissue_mask.size))\n return tissue_mask" }, { "identifier": "StitchCoords", "path": "wsi_core/wsi_utils.py", "snippet": "def StitchCoords(hdf5_file_path, wsi_object, downscale=16, draw_grid=False, bg_color=(0,0,0), alpha=-1):\n wsi = wsi_object.getOpenSlide()\n vis_level = wsi.get_best_level_for_downsample(downscale)\n file = h5py.File(hdf5_file_path, 'r')\n dset = file['coords']\n coords = dset[:]\n w, h = wsi.level_dimensions[0]\n\n print('start stitching {}'.format(dset.attrs['name']))\n print('original size: {} x {}'.format(w, h))\n\n w, h = wsi.level_dimensions[vis_level]\n\n print('downscaled size for stiching: {} x {}'.format(w, h))\n print('number of patches: {}'.format(len(coords)))\n \n patch_size = dset.attrs['patch_size']\n patch_level = dset.attrs['patch_level']\n print('patch size: {}x{} patch level: {}'.format(patch_size, patch_size, patch_level))\n patch_size = tuple((np.array((patch_size, patch_size)) * wsi.level_downsamples[patch_level]).astype(np.int32))\n print('ref patch size: {}x{}'.format(patch_size, patch_size))\n\n if w*h > Image.MAX_IMAGE_PIXELS: \n raise Image.DecompressionBombError(\"Visualization Downscale %d is too large\" % downscale)\n \n if alpha < 0 or alpha == -1:\n heatmap = Image.new(size=(w,h), mode=\"RGB\", color=bg_color)\n else:\n heatmap = Image.new(size=(w,h), mode=\"RGBA\", color=bg_color + (int(255 * alpha),))\n \n heatmap = np.array(heatmap)\n heatmap = DrawMapFromCoords(heatmap, wsi_object, coords, patch_size, vis_level, indices=None, draw_grid=draw_grid)\n \n file.close()\n return heatmap" }, { "identifier": "initialize_df", "path": "wsi_core/batch_process_utils.py", "snippet": "def initialize_df(slides, seg_params, filter_params, vis_params, patch_params, \n\tuse_heatmap_args=False, save_patches=False):\n\n\ttotal = len(slides)\n\tif isinstance(slides, pd.DataFrame):\n\t\tslide_ids = slides.slide_id.values\n\telse:\n\t\tslide_ids = slides\n\tdefault_df_dict = {'slide_id': slide_ids, 'process': np.full((total), 1, dtype=np.uint8)}\n\n\t# initiate empty labels in case not provided\n\tif use_heatmap_args:\n\t\tdefault_df_dict.update({'label': np.full((total), -1)})\n\t\n\tdefault_df_dict.update({\n\t\t'status': np.full((total), 'tbp'),\n\t\t# seg params\n\t\t'seg_level': np.full((total), int(seg_params['seg_level']), dtype=np.int8),\n\t\t'sthresh': np.full((total), int(seg_params['sthresh']), dtype=np.uint8),\n\t\t'mthresh': np.full((total), int(seg_params['mthresh']), dtype=np.uint8),\n\t\t'close': np.full((total), int(seg_params['close']), dtype=np.uint32),\n\t\t'use_otsu': np.full((total), bool(seg_params['use_otsu']), dtype=bool),\n\t\t'keep_ids': np.full((total), seg_params['keep_ids']),\n\t\t'exclude_ids': np.full((total), seg_params['exclude_ids']),\n\t\t\n\t\t# filter params\n\t\t'a_t': np.full((total), int(filter_params['a_t']), dtype=np.float32),\n\t\t'a_h': np.full((total), int(filter_params['a_h']), dtype=np.float32),\n\t\t'max_n_holes': np.full((total), int(filter_params['max_n_holes']), dtype=np.uint32),\n\n\t\t# vis params\n\t\t'vis_level': np.full((total), int(vis_params['vis_level']), dtype=np.int8),\n\t\t'line_thickness': np.full((total), int(vis_params['line_thickness']), dtype=np.uint32),\n\n\t\t# patching params\n\t\t'use_padding': np.full((total), bool(patch_params['use_padding']), dtype=bool),\n\t\t'contour_fn': np.full((total), patch_params['contour_fn'])\n\t\t})\n\n\tif save_patches:\n\t\tdefault_df_dict.update({\n\t\t\t'white_thresh': np.full((total), int(patch_params['white_thresh']), dtype=np.uint8),\n\t\t\t'black_thresh': np.full((total), int(patch_params['black_thresh']), dtype=np.uint8)})\n\n\tif use_heatmap_args:\n\t\t# initiate empty x,y coordinates in case not provided\n\t\tdefault_df_dict.update({'x1': np.empty((total)).fill(np.NaN), \n\t\t\t'x2': np.empty((total)).fill(np.NaN), \n\t\t\t'y1': np.empty((total)).fill(np.NaN), \n\t\t\t'y2': np.empty((total)).fill(np.NaN)})\n\n\n\tif isinstance(slides, pd.DataFrame):\n\t\ttemp_copy = pd.DataFrame(default_df_dict) # temporary dataframe w/ default params\n\t\t# find key in provided df\n\t\t# if exist, fill empty fields w/ default values, else, insert the default values as a new column\n\t\tfor key in default_df_dict.keys(): \n\t\t\tif key in slides.columns:\n\t\t\t\tmask = slides[key].isna()\n\t\t\t\tslides.loc[mask, key] = temp_copy.loc[mask, key]\n\t\t\telse:\n\t\t\t\tslides.insert(len(slides.columns), key, default_df_dict[key])\n\telse:\n\t\tslides = pd.DataFrame(default_df_dict)\n\t\n\treturn slides" } ]
from wsi_core.WholeSlideImage import WholeSlideImage from wsi_core.wsi_utils import StitchCoords from wsi_core.batch_process_utils import initialize_df from glob import glob import os import numpy as np import time import argparse import pdb import pandas as pd
12,265
# internal imports # other imports def stitching(file_path, wsi_object, downscale=64): start = time.time() heatmap = StitchCoords(file_path, wsi_object, downscale=downscale, bg_color=(0, 0, 0), alpha=-1, draw_grid=False) total_time = time.time() - start return heatmap, total_time def segment(WSI_object, seg_params, filter_params): ### Start Seg Timer start_time = time.time() # Segment WSI_object.segmentTissue(**seg_params, filter_params=filter_params) ### Stop Seg Timers seg_time_elapsed = time.time() - start_time return WSI_object, seg_time_elapsed def patching(WSI_object, **kwargs): ### Start Patch Timer start_time = time.time() # Patch file_path = WSI_object.process_contours(**kwargs) ### Stop Patch Timer patch_time_elapsed = time.time() - start_time return file_path, patch_time_elapsed def walk_dir(data_dir, file_types=['.kfb', '.tif', '.svs', '.ndpi', '.mrxs', '.hdx', '.sdpc', '.mdsx', '.tiff', '.tmap']): path_list = [] for dirpath, dirnames, files in os.walk(data_dir): for f in files: for this_type in file_types: if f.lower().endswith(this_type): path_list.append(os.path.join(dirpath, f)) break return path_list def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_dir, patch_size=256, step_size=256, seg_params={'seg_level': -1, 'sthresh': 8, 'mthresh': 7, 'close': 4, 'use_otsu': False, 'keep_ids': 'none', 'exclude_ids': 'none'}, filter_params={'a_t': 100, 'a_h': 16, 'max_n_holes': 8}, vis_params={'vis_level': -1, 'line_thickness': 500}, patch_params={'use_padding': True, 'contour_fn': 'four_pt'}, patch_level=1, use_default_params=False, seg=False, save_mask=True, stitch=False, patch=False, auto_skip=True, process_list=None): slides = glob(source + '/*/*/*/*.svs') # slides = sorted(os.listdir(source), reverse=True) # slides = # pdb.set_trace() # slides = slides[-10:] slides = [slide for slide in slides if os.path.isfile(os.path.join(source, slide))] if process_list is None: df = initialize_df(slides, seg_params, filter_params, vis_params, patch_params) else: df = pd.read_csv(process_list) df = initialize_df(df, seg_params, filter_params, vis_params, patch_params) mask = df['process'] == 1 process_stack = df[mask] total = len(process_stack) legacy_support = 'a' in df.keys() if legacy_support: print('detected legacy segmentation csv file, legacy support enabled') df = df.assign(**{'a_t': np.full((len(df)), int(filter_params['a_t']), dtype=np.uint32), 'a_h': np.full((len(df)), int(filter_params['a_h']), dtype=np.uint32), 'max_n_holes': np.full((len(df)), int(filter_params['max_n_holes']), dtype=np.uint32), 'line_thickness': np.full((len(df)), int(vis_params['line_thickness']), dtype=np.uint32), 'contour_fn': np.full((len(df)), patch_params['contour_fn'])}) seg_times = 0. patch_times = 0. stitch_times = 0. for i in range(total): df.to_csv(os.path.join(save_dir, 'process_list_autogen.csv'), index=False) idx = process_stack.index[i] slide = process_stack.loc[idx, 'slide_id'] print("\n\nprogress: {:.2f}, {}/{}".format(i / total, i, total)) print('processing {}'.format(slide)) df.loc[idx, 'process'] = 0 slide_id, _ = os.path.splitext(slide.split('/')[-1]) if auto_skip and os.path.isfile(os.path.join(patch_save_dir, slide_id + '.h5')): print('{} already exist in destination location, skipped'.format(slide_id)) df.loc[idx, 'status'] = 'already_exist' continue # Inialize WSI full_path = slide try:
# internal imports # other imports def stitching(file_path, wsi_object, downscale=64): start = time.time() heatmap = StitchCoords(file_path, wsi_object, downscale=downscale, bg_color=(0, 0, 0), alpha=-1, draw_grid=False) total_time = time.time() - start return heatmap, total_time def segment(WSI_object, seg_params, filter_params): ### Start Seg Timer start_time = time.time() # Segment WSI_object.segmentTissue(**seg_params, filter_params=filter_params) ### Stop Seg Timers seg_time_elapsed = time.time() - start_time return WSI_object, seg_time_elapsed def patching(WSI_object, **kwargs): ### Start Patch Timer start_time = time.time() # Patch file_path = WSI_object.process_contours(**kwargs) ### Stop Patch Timer patch_time_elapsed = time.time() - start_time return file_path, patch_time_elapsed def walk_dir(data_dir, file_types=['.kfb', '.tif', '.svs', '.ndpi', '.mrxs', '.hdx', '.sdpc', '.mdsx', '.tiff', '.tmap']): path_list = [] for dirpath, dirnames, files in os.walk(data_dir): for f in files: for this_type in file_types: if f.lower().endswith(this_type): path_list.append(os.path.join(dirpath, f)) break return path_list def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_dir, patch_size=256, step_size=256, seg_params={'seg_level': -1, 'sthresh': 8, 'mthresh': 7, 'close': 4, 'use_otsu': False, 'keep_ids': 'none', 'exclude_ids': 'none'}, filter_params={'a_t': 100, 'a_h': 16, 'max_n_holes': 8}, vis_params={'vis_level': -1, 'line_thickness': 500}, patch_params={'use_padding': True, 'contour_fn': 'four_pt'}, patch_level=1, use_default_params=False, seg=False, save_mask=True, stitch=False, patch=False, auto_skip=True, process_list=None): slides = glob(source + '/*/*/*/*.svs') # slides = sorted(os.listdir(source), reverse=True) # slides = # pdb.set_trace() # slides = slides[-10:] slides = [slide for slide in slides if os.path.isfile(os.path.join(source, slide))] if process_list is None: df = initialize_df(slides, seg_params, filter_params, vis_params, patch_params) else: df = pd.read_csv(process_list) df = initialize_df(df, seg_params, filter_params, vis_params, patch_params) mask = df['process'] == 1 process_stack = df[mask] total = len(process_stack) legacy_support = 'a' in df.keys() if legacy_support: print('detected legacy segmentation csv file, legacy support enabled') df = df.assign(**{'a_t': np.full((len(df)), int(filter_params['a_t']), dtype=np.uint32), 'a_h': np.full((len(df)), int(filter_params['a_h']), dtype=np.uint32), 'max_n_holes': np.full((len(df)), int(filter_params['max_n_holes']), dtype=np.uint32), 'line_thickness': np.full((len(df)), int(vis_params['line_thickness']), dtype=np.uint32), 'contour_fn': np.full((len(df)), patch_params['contour_fn'])}) seg_times = 0. patch_times = 0. stitch_times = 0. for i in range(total): df.to_csv(os.path.join(save_dir, 'process_list_autogen.csv'), index=False) idx = process_stack.index[i] slide = process_stack.loc[idx, 'slide_id'] print("\n\nprogress: {:.2f}, {}/{}".format(i / total, i, total)) print('processing {}'.format(slide)) df.loc[idx, 'process'] = 0 slide_id, _ = os.path.splitext(slide.split('/')[-1]) if auto_skip and os.path.isfile(os.path.join(patch_save_dir, slide_id + '.h5')): print('{} already exist in destination location, skipped'.format(slide_id)) df.loc[idx, 'status'] = 'already_exist' continue # Inialize WSI full_path = slide try:
WSI_object = WholeSlideImage(full_path)
0
2023-11-12 14:07:34+00:00
16k
zhang-tao-whu/DVIS_Plus
dvis_Plus/meta_architecture.py
[ { "identifier": "VideoSetCriterion", "path": "mask2former_video/modeling/criterion.py", "snippet": "class VideoSetCriterion(nn.Module):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,\n num_points, oversample_ratio, importance_sample_ratio, frames=2):\n \"\"\"Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer(\"empty_weight\", empty_weight)\n\n # pointwise mask loss parameters\n self.num_points = num_points\n self.oversample_ratio = oversample_ratio\n self.importance_sample_ratio = importance_sample_ratio\n self.frames = frames\n\n def loss_labels(self, outputs, targets, indices, num_masks):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"].float()\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device\n )\n target_classes[idx] = target_classes_o.to(target_classes)\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)\n losses = {\"loss_ce\": loss_ce}\n return losses\n \n def loss_masks(self, outputs, targets, indices, num_masks):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n src_masks = outputs[\"pred_masks\"]\n src_masks = src_masks[src_idx]\n # Modified to handle video\n target_masks = torch.cat([t['masks'][i] for t, (_, i) in zip(targets, indices)]).to(src_masks)\n\n # No need to upsample predictions as we are using normalized coordinates :)\n # NT x 1 x H x W\n src_masks = src_masks.flatten(0, 1)[:, None]\n target_masks = target_masks.flatten(0, 1)[:, None]\n\n with torch.no_grad():\n # sample point_coords\n point_coords = get_uncertain_point_coords_with_randomness(\n src_masks.to(torch.float32),\n lambda logits: calculate_uncertainty(logits),\n self.num_points,\n self.oversample_ratio,\n self.importance_sample_ratio,\n )\n # get gt labels\n point_labels = point_sample(\n target_masks,\n point_coords.to(target_masks),\n align_corners=False,\n ).squeeze(1)\n\n point_logits = point_sample(\n src_masks,\n point_coords.to(src_masks),\n align_corners=False,\n ).squeeze(1)\n\n losses = {\n \"loss_mask\": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),\n \"loss_dice\": dice_loss_jit(point_logits, point_labels, num_masks),\n }\n\n del src_masks\n del target_masks\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_masks):\n loss_map = {\n 'labels': self.loss_labels,\n 'masks': self.loss_masks,\n }\n assert loss in loss_map, f\"do you really want to compute {loss} loss?\"\n return loss_map[loss](outputs, targets, indices, num_masks)\n\n def forward(self, outputs, targets, matcher_outputs=None, ret_match_result=False):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n if matcher_outputs is None:\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n else:\n outputs_without_aux = {k: v for k, v in matcher_outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n # [per image indicates], per image indicates -> (pred inds, gt inds)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_masks = sum(len(t[\"labels\"]) for t in targets)\n num_masks = torch.as_tensor(\n [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_masks)\n num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n if matcher_outputs is None:\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n if ret_match_result:\n return losses, indices\n return losses\n\n def __repr__(self):\n head = \"Criterion \" + self.__class__.__name__\n body = [\n \"matcher: {}\".format(self.matcher.__repr__(_repr_indent=8)),\n \"losses: {}\".format(self.losses),\n \"weight_dict: {}\".format(self.weight_dict),\n \"num_classes: {}\".format(self.num_classes),\n \"eos_coef: {}\".format(self.eos_coef),\n \"num_points: {}\".format(self.num_points),\n \"oversample_ratio: {}\".format(self.oversample_ratio),\n \"importance_sample_ratio: {}\".format(self.importance_sample_ratio),\n ]\n _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "VideoHungarianMatcher", "path": "mask2former_video/modeling/matcher.py", "snippet": "class VideoHungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost\n cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_mask = cost_mask\n self.cost_dice = cost_dice\n\n assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, \"all costs cant be 0\"\n\n self.num_points = num_points\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n indices = []\n\n # Iterate through batch size\n for b in range(bs):\n\n out_prob = outputs[\"pred_logits\"][b].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[b][\"labels\"].to(torch.int64)\n\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n try:\n cost_class = -out_prob[:, tgt_ids]\n except:\n cost_class = 0.0\n print(tgt_ids)\n\n out_mask = outputs[\"pred_masks\"][b] # [num_queries, T, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[b][\"masks\"].to(out_mask) # [num_gts, T, H_pred, W_pred]\n\n # out_mask = out_mask[:, None]\n # tgt_mask = tgt_mask[:, None]\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1).to(tgt_mask),\n align_corners=False,\n ).flatten(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1).to(out_mask),\n align_corners=False,\n ).flatten(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n\n indices.append(linear_sum_assignment(C))\n\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n # [per image indicates], per image indicates -> (pred inds, gt inds)\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_masks\": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"masks\": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n return self.memory_efficient_forward(outputs, targets)\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_mask: {}\".format(self.cost_mask),\n \"cost_dice: {}\".format(self.cost_dice),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "VideoHungarianMatcher_Consistent", "path": "mask2former_video/modeling/matcher.py", "snippet": "class VideoHungarianMatcher_Consistent(VideoHungarianMatcher):\n \"\"\"\n Only match in the first frame where the object appears in the GT.\n \"\"\"\n def __init__(self, cost_class: float = 1, cost_mask: float = 1,\n cost_dice: float = 1, num_points: int = 0,\n frames: int = 5):\n super().__init__(\n cost_class=cost_class, cost_mask=cost_mask,\n cost_dice=cost_dice, num_points=num_points,\n )\n self.frames = frames\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n indices = []\n\n # Iterate through batch size\n for b in range(bs // self.frames):\n # find the fist frame where the object appears\n id_apper_frame = {}\n for f in range(self.frames):\n overall_bs = b * self.frames + f\n instance_ids = targets[overall_bs][\"ids\"]\n valid = torch.nonzero(instance_ids.squeeze(1) != -1)\n for v in valid:\n v = v.item()\n if v not in id_apper_frame.keys():\n id_apper_frame[v] = f\n\n # obtain the object ID that first appears in each frame\n apper_frame_id = {}\n for id in id_apper_frame.keys():\n f = id_apper_frame[id]\n if f in apper_frame_id:\n apper_frame_id[f].append(id)\n else:\n apper_frame_id[f] = [id]\n need_match_frames = list(apper_frame_id.keys())\n need_match_frames.sort()\n\n # per frame match\n used_query_idx = []\n matched_indices = [[], []]\n for f in need_match_frames:\n overall_bs = b * self.frames + f\n used_tgt = apper_frame_id[f]\n out_prob = outputs[\"pred_logits\"][overall_bs].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[overall_bs][\"labels\"][used_tgt]\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n\n out_mask = outputs[\"pred_masks\"][overall_bs] # [num_queries, T, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[overall_bs][\"masks\"][used_tgt].to(out_mask) # [num_gts, T, H_pred, W_pred]\n\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1).to(tgt_mask),\n align_corners=False,\n ).flatten(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1).to(out_mask),\n align_corners=False,\n ).flatten(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n if len(used_query_idx) != 0:\n C[used_query_idx, :] = 1e6\n indice1, indice2 = linear_sum_assignment(C)\n\n used_query_idx += list(indice1)\n\n indice2 = np.array(used_tgt)[indice2]\n matched_indices[0] += list(indice1)\n matched_indices[1] += list(indice2)\n indices += [matched_indices] * self.frames\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]" }, { "identifier": "retry_if_cuda_oom", "path": "mask2former_video/utils/memory.py", "snippet": "def retry_if_cuda_oom(func):\n \"\"\"\n Makes a function retry itself after encountering\n pytorch's CUDA OOM error.\n It will first retry after calling `torch.cuda.empty_cache()`.\n If that still fails, it will then retry by trying to convert inputs to CPUs.\n In this case, it expects the function to dispatch to CPU implementation.\n The return values may become CPU tensors as well and it's user's\n responsibility to convert it back to CUDA tensor if needed.\n Args:\n func: a stateless callable that takes tensor-like objects as arguments\n Returns:\n a callable which retries `func` if OOM is encountered.\n Examples:\n ::\n output = retry_if_cuda_oom(some_torch_function)(input1, input2)\n # output may be on CPU even if inputs are on GPU\n Note:\n 1. When converting inputs to CPU, it will only look at each argument and check\n if it has `.device` and `.to` for conversion. Nested structures of tensors\n are not supported.\n 2. Since the function might be called more than once, it has to be\n stateless.\n \"\"\"\n\n def maybe_to_cpu(x):\n try:\n like_gpu_tensor = x.device.type == \"cuda\" and hasattr(x, \"to\")\n except AttributeError:\n like_gpu_tensor = False\n if like_gpu_tensor:\n return x.to(device=\"cpu\").to(torch.float32)\n else:\n return x\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Clear cache and retry\n torch.cuda.empty_cache()\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Try on CPU. This slows down the code significantly, therefore print a notice.\n logger = logging.getLogger(__name__)\n logger.info(\"Attempting to copy inputs to CPU due to CUDA OOM\")\n new_args = (maybe_to_cpu(x) for x in args)\n new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}\n with autocast(enabled=False):\n return func(*new_args, **new_kwargs)\n\n return wrapped" }, { "identifier": "ReferringTracker_noiser", "path": "dvis_Plus/tracker.py", "snippet": "class ReferringTracker_noiser(torch.nn.Module):\n def __init__(\n self,\n hidden_channel=256,\n feedforward_channel=2048,\n num_head=8,\n decoder_layer_num=6,\n mask_dim=256,\n class_num=25,\n noise_mode='hard',\n noise_ratio=0.5,\n ):\n super(ReferringTracker_noiser, self).__init__()\n\n # init transformer layers\n self.num_heads = num_head\n self.num_layers = decoder_layer_num\n self.transformer_self_attention_layers = nn.ModuleList()\n self.transformer_cross_attention_layers = nn.ModuleList()\n self.transformer_ffn_layers = nn.ModuleList()\n\n for _ in range(self.num_layers):\n\n self.transformer_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_cross_attention_layers.append(\n ReferringCrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_ffn_layers.append(\n FFNLayer(\n d_model=hidden_channel,\n dim_feedforward=feedforward_channel,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.use_memory = False\n if self.use_memory:\n self.memory_cross_attn = CrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,)\n self.references_memory = None\n\n self.decoder_norm = nn.LayerNorm(hidden_channel)\n\n # init heads\n self.class_embed = nn.Linear(2 * hidden_channel, class_num + 1)\n self.mask_embed = MLP(hidden_channel, hidden_channel, mask_dim, 3)\n\n # for cl learning\n self.ref_proj = MLP(hidden_channel, hidden_channel, hidden_channel, 3)\n\n for layer in self.ref_proj.layers:\n weight_init.c2_xavier_fill(layer)\n\n # mask features projection\n self.mask_feature_proj = nn.Conv2d(\n mask_dim,\n mask_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n\n # record previous frame information\n self.last_outputs = None\n self.last_frame_embeds = None\n self.last_reference = None\n\n self.noiser = Noiser(noise_ratio=noise_ratio, mode=noise_mode)\n\n def _clear_memory(self):\n del self.last_outputs\n self.last_outputs = None\n self.last_reference = None\n return\n\n def forward(self, frame_embeds, mask_features, resume=False,\n return_indices=False, frame_classes=None,\n frame_embeds_no_norm=None):\n \"\"\"\n :param frame_embeds: the instance queries output by the segmenter\n :param mask_features: the mask features output by the segmenter\n :param resume: whether the first frame is the start of the video\n :param return_indices: whether return the match indices\n :return: output dict, including masks, classes, embeds.\n \"\"\"\n # mask feature projection\n mask_features_shape = mask_features.shape\n mask_features = self.mask_feature_proj(mask_features.flatten(0, 1)).reshape(*mask_features_shape) # (b, t, c, h, w)\n\n frame_embeds = frame_embeds.permute(2, 3, 0, 1) # t, q, b, c\n if frame_embeds_no_norm is not None:\n frame_embeds_no_norm = frame_embeds_no_norm.permute(2, 3, 0, 1) # t, q, b, c\n n_frame, n_q, bs, _ = frame_embeds.size()\n outputs = []\n ret_indices = []\n\n all_frames_references = []\n\n for i in range(n_frame):\n ms_output = []\n single_frame_embeds = frame_embeds[i] # q b c\n if frame_embeds_no_norm is not None:\n single_frame_embeds_no_norm = frame_embeds_no_norm[i]\n else:\n single_frame_embeds_no_norm = single_frame_embeds\n if frame_classes is None:\n single_frame_classes = None\n else:\n single_frame_classes = frame_classes[i]\n\n frame_key = single_frame_embeds_no_norm\n\n # the first frame of a video\n if i == 0 and resume is False:\n self._clear_memory()\n for j in range(self.num_layers):\n if j == 0:\n indices, noised_init = self.noiser(\n single_frame_embeds,\n single_frame_embeds,\n cur_embeds_no_norm=single_frame_embeds_no_norm,\n activate=False,\n cur_classes=single_frame_classes,\n )\n ms_output.append(single_frame_embeds_no_norm[indices])\n self.last_frame_embeds = single_frame_embeds[indices]\n ret_indices.append(indices)\n output = self.transformer_cross_attention_layers[j](\n noised_init, self.ref_proj(frame_key),\n frame_key, single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n else:\n output = self.transformer_cross_attention_layers[j](\n ms_output[-1], self.ref_proj(ms_output[-1]),\n frame_key, single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n self.last_reference = self.ref_proj(frame_key)\n else:\n reference = self.ref_proj(self.last_outputs[-1])\n self.last_reference = reference\n\n for j in range(self.num_layers):\n if j == 0:\n indices, noised_init = self.noiser(\n self.last_frame_embeds,\n single_frame_embeds,\n cur_embeds_no_norm=single_frame_embeds_no_norm,\n activate=self.training,\n cur_classes=single_frame_classes,\n )\n ms_output.append(single_frame_embeds_no_norm[indices])\n self.last_frame_embeds = single_frame_embeds[indices]\n ret_indices.append(indices)\n output = self.transformer_cross_attention_layers[j](\n noised_init, reference, frame_key,\n single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n else:\n output = self.transformer_cross_attention_layers[j](\n ms_output[-1], reference, frame_key,\n single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n\n all_frames_references.append(self.last_reference)\n\n ms_output = torch.stack(ms_output, dim=0) # (1 + layers, q, b, c)\n self.last_outputs = ms_output\n outputs.append(ms_output[1:])\n outputs = torch.stack(outputs, dim=0) # (t, l, q, b, c)\n\n all_frames_references = torch.stack(all_frames_references, dim=0) # (t, q, b, c)\n\n mask_features_ = mask_features\n if not self.training:\n outputs = outputs[:, -1:]\n del mask_features\n outputs_class, outputs_masks = self.prediction(outputs, mask_features_, all_frames_references)\n out = {\n 'pred_logits': outputs_class[-1].transpose(1, 2), # (b, t, q, c)\n 'pred_masks': outputs_masks[-1], # (b, q, t, h, w)\n 'aux_outputs': self._set_aux_loss(\n outputs_class, outputs_masks\n ),\n 'pred_embds': outputs[:, -1].permute(2, 3, 0, 1), # (b, c, t, q),\n 'pred_references': all_frames_references.permute(2, 3, 0, 1), # (b, c, t, q),\n }\n if return_indices:\n return out, ret_indices\n else:\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_seg_masks):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{\"pred_logits\": a.transpose(1, 2), \"pred_masks\": b}\n for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])\n ]\n\n def prediction(self, outputs, mask_features, references):\n # outputs (t, l, q, b, c)\n # mask_features (b, t, c, h, w)\n # references (t, q, b, c)\n decoder_output = self.decoder_norm(outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n\n references = references.unsqueeze(1).repeat(1, decoder_output.size(0), 1, 1, 1).permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n decoder_output_cls = torch.cat([references, decoder_output], dim=-1)\n outputs_class = self.class_embed(decoder_output_cls).transpose(2, 3) # (l, b, q, t, cls+1)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\"lbtqc,btchw->lbqthw\", mask_embed, mask_features)\n return outputs_class, outputs_mask" }, { "identifier": "TemporalRefiner", "path": "dvis_Plus/refiner.py", "snippet": "class TemporalRefiner(torch.nn.Module):\n def __init__(\n self,\n hidden_channel=256,\n feedforward_channel=2048,\n num_head=8,\n decoder_layer_num=6,\n mask_dim=256,\n class_num=25,\n windows=5,\n ):\n super(TemporalRefiner, self).__init__()\n\n self.windows = windows\n\n # init transformer layers\n self.num_heads = num_head\n self.num_layers = decoder_layer_num\n self.transformer_obj_self_attention_layers = nn.ModuleList()\n self.transformer_time_self_attention_layers = nn.ModuleList()\n self.transformer_cross_attention_layers = nn.ModuleList()\n self.transformer_ffn_layers = nn.ModuleList()\n\n self.conv_short_aggregate_layers = nn.ModuleList()\n self.conv_norms = nn.ModuleList()\n\n for _ in range(self.num_layers):\n self.transformer_time_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.conv_short_aggregate_layers.append(\n nn.Sequential(\n nn.Conv1d(hidden_channel, hidden_channel,\n kernel_size=5, stride=1,\n padding='same', padding_mode='replicate'),\n nn.ReLU(inplace=True),\n nn.Conv1d(hidden_channel, hidden_channel,\n kernel_size=3, stride=1,\n padding='same', padding_mode='replicate'),\n )\n )\n\n self.conv_norms.append(nn.LayerNorm(hidden_channel))\n\n self.transformer_obj_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_cross_attention_layers.append(\n CrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_ffn_layers.append(\n FFNLayer(\n d_model=hidden_channel,\n dim_feedforward=feedforward_channel,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.decoder_norm = nn.LayerNorm(hidden_channel)\n\n # init heads\n self.class_embed = nn.Linear(hidden_channel, class_num + 1)\n self.mask_embed = MLP(hidden_channel, hidden_channel, mask_dim, 3)\n\n self.activation_proj = nn.Linear(hidden_channel, 1)\n\n def forward(self, instance_embeds, frame_embeds, mask_features):\n \"\"\"\n :param instance_embeds: the aligned instance queries output by the tracker, shape is (b, c, t, q)\n :param frame_embeds: the instance queries processed by the tracker.frame_forward function, shape is (b, c, t, q)\n :param mask_features: the mask features output by the segmenter, shape is (b, t, c, h, w)\n :return: output dict, including masks, classes, embeds.\n \"\"\"\n n_batch, n_channel, n_frames, n_instance = instance_embeds.size()\n\n outputs = []\n output = instance_embeds\n frame_embeds = frame_embeds.permute(3, 0, 2, 1).flatten(1, 2)\n\n for i in range(self.num_layers):\n output = output.permute(2, 0, 3, 1) # (t, b, q, c)\n output = output.flatten(1, 2) # (t, bq, c)\n\n # do long temporal attention\n output = self.transformer_time_self_attention_layers[i](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n\n # do short temporal conv\n output = output.permute(1, 2, 0) # (bq, c, t)\n output = self.conv_norms[i](\n (self.conv_short_aggregate_layers[i](output) + output).transpose(1, 2)\n ).transpose(1, 2)\n output = output.reshape(\n n_batch, n_instance, n_channel, n_frames\n ).permute(1, 0, 3, 2).flatten(1, 2) # (q, bt, c)\n\n # do objects self attention\n output = self.transformer_obj_self_attention_layers[i](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n\n # do cross attention\n output = self.transformer_cross_attention_layers[i](\n output, frame_embeds,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n # FFN\n output = self.transformer_ffn_layers[i](\n output\n )\n\n output = output.reshape(n_instance, n_batch, n_frames, n_channel).permute(1, 3, 2, 0) # (b, c, t, q)\n outputs.append(output)\n\n outputs = torch.stack(outputs, dim=0).permute(3, 0, 4, 1, 2) # (l, b, c, t, q) -> (t, l, q, b, c)\n outputs_class, outputs_masks = self.prediction(outputs, mask_features)\n outputs = self.decoder_norm(outputs)\n out = {\n 'pred_logits': outputs_class[-1].transpose(1, 2), # (b, t, q, c)\n 'pred_masks': outputs_masks[-1], # (b, q, t, h, w)\n 'aux_outputs': self._set_aux_loss(\n outputs_class, outputs_masks\n ),\n 'pred_embds': outputs[:, -1].permute(2, 3, 0, 1) # (b, c, t, q)\n }\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_seg_masks):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{\"pred_logits\": a.transpose(1, 2), \"pred_masks\": b}\n for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])\n ]\n\n def windows_prediction(self, outputs, mask_features, windows=5):\n \"\"\"\n for windows prediction, because mask features consumed too much GPU memory\n \"\"\"\n iters = outputs.size(0) // windows\n if outputs.size(0) % windows != 0:\n iters += 1\n outputs_classes = []\n outputs_masks = []\n for i in range(iters):\n start_idx = i * windows\n end_idx = (i + 1) * windows\n clip_outputs = outputs[start_idx:end_idx]\n decoder_output = self.decoder_norm(clip_outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\n \"lbtqc,btchw->lbqthw\",\n mask_embed,\n mask_features[:, start_idx:end_idx].to(mask_embed.device)\n )\n outputs_classes.append(decoder_output)\n outputs_masks.append(outputs_mask.cpu().to(torch.float32))\n outputs_classes = torch.cat(outputs_classes, dim=2)\n outputs_classes = self.pred_class(outputs_classes)\n return outputs_classes.cpu().to(torch.float32), torch.cat(outputs_masks, dim=3)\n\n def pred_class(self, decoder_output):\n \"\"\"\n fuse the objects queries of all frames and predict an overall score based on the fused objects queries\n :param decoder_output: instance queries, shape is (l, b, t, q, c)\n \"\"\"\n T = decoder_output.size(2)\n\n # compute the weighted average of the decoder_output\n activation = self.activation_proj(decoder_output).softmax(dim=2) # (l, b, t, q, 1)\n class_output = (decoder_output * activation).sum(dim=2, keepdim=True) # (l, b, 1, q, c)\n\n # to unify the output format, duplicate the fused features T times\n class_output = class_output.repeat(1, 1, T, 1, 1)\n outputs_class = self.class_embed(class_output).transpose(2, 3)\n return outputs_class\n\n def prediction(self, outputs, mask_features):\n \"\"\"\n :param outputs: instance queries, shape is (t, l, q, b, c)\n :param mask_features: mask features, shape is (b, t, c, h, w)\n :return: pred class and pred masks\n \"\"\"\n if self.training:\n decoder_output = self.decoder_norm(outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n outputs_class = self.pred_class(decoder_output)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\"lbtqc,btchw->lbqthw\", mask_embed, mask_features)\n else:\n outputs = outputs[:, -1:]\n outputs_class, outputs_mask = self.windows_prediction(outputs, mask_features, windows=self.windows)\n return outputs_class, outputs_mask" }, { "identifier": "loss_reid", "path": "dvis_Plus/utils.py", "snippet": "def loss_reid(qd_items, outputs):\n # outputs only using when have not contrastive items\n # compute two loss, contrastive loss & similarity loss\n contras_loss = 0\n aux_loss = 0\n num_qd_items = len(qd_items) # n_instances * frames\n\n # if none items, return 0 loss\n if len(qd_items) == 0:\n if 'pred_references' in outputs.keys():\n losses = {'loss_reid': outputs['pred_references'].sum() * 0,\n 'loss_aux_reid': outputs['pred_references'].sum() * 0}\n else:\n losses = {'loss_reid': outputs['pred_embds'].sum() * 0,\n 'loss_aux_reid': outputs['pred_embds'].sum() * 0}\n return losses\n\n for qd_item in qd_items:\n # (n_pos, n_anchor) -> (n_anchor, n_pos)\n pred = qd_item['dot_product'].permute(1, 0)\n label = qd_item['label'].unsqueeze(0)\n # contrastive loss\n pos_inds = (label == 1)\n neg_inds = (label == 0)\n pred_pos = pred * pos_inds.float()\n pred_neg = pred * neg_inds.float()\n # use -inf to mask out unwanted elements.\n pred_pos[neg_inds] = pred_pos[neg_inds] + float('inf')\n pred_neg[pos_inds] = pred_neg[pos_inds] + float('-inf')\n\n _pos_expand = torch.repeat_interleave(pred_pos, pred.shape[1], dim=1)\n _neg_expand = pred_neg.repeat(1, pred.shape[1])\n # [bz,N], N is all pos and negative samples on reference frame, label indicate it's pos or negative\n x = torch.nn.functional.pad(\n (_neg_expand - _pos_expand), (0, 1), \"constant\", 0)\n contras_loss += torch.logsumexp(x, dim=1)\n\n aux_pred = qd_item['cosine_similarity'].permute(1, 0)\n aux_label = qd_item['label'].unsqueeze(0)\n aux_loss += (torch.abs(aux_pred - aux_label) ** 2).mean()\n\n losses = {'loss_reid': contras_loss.sum() / num_qd_items,\n 'loss_aux_reid': aux_loss / num_qd_items}\n return losses" }, { "identifier": "Outputs_Memory_PerClasses", "path": "dvis_Plus/utils.py", "snippet": "class Outputs_Memory_PerClasses:\n def __init__(self, max_len=100,):\n self.class_references = {}\n self.max_len = max_len\n\n def push(self, references, targets, referecne_match_result):\n # for tracker\n references = references.detach()\n for i in range(len(targets)):\n classes = targets[i]['labels'] # (N, )\n frame_match_result = referecne_match_result[i]\n frame_reference = references[i]\n for i_ref, i_gt in zip(frame_match_result[0], frame_match_result[1]):\n cls = classes[i_gt].item()\n if cls in self.class_references.keys():\n self.class_references[cls].append(frame_reference[i_ref])\n else:\n self.class_references[cls] = [frame_reference[i_ref]]\n for cls in self.class_references.keys():\n if len(self.class_references[cls]) > self.max_len:\n self.class_references[cls] = self.class_references[cls][-self.max_len:]\n return\n\n def push_refiner(self, references, targets, referecne_match_result):\n # for refiner\n references = references.clone().detach()\n classes = targets['labels'] # (N, )\n for i_ref, i_gt in zip(referecne_match_result[0], referecne_match_result[1]):\n cls = classes[i_gt].item()\n if cls in self.class_references.keys():\n self.class_references[cls].extend(list(torch.unbind(references[:, i_ref], dim=0)))\n else:\n self.class_references[cls] = list(torch.unbind(references[:, i_ref], dim=0))\n\n for cls in self.class_references.keys():\n if len(self.class_references[cls]) > self.max_len:\n random.shuffle(self.class_references[cls])\n self.class_references[cls] = self.class_references[cls][-self.max_len:]\n return\n\n def get_items(self, cls):\n if cls not in self.class_references.keys():\n return []\n else:\n cls_ref = torch.stack(self.class_references[cls], dim=0)\n return cls_ref" } ]
from typing import Tuple from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.data import MetadataCatalog from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head from detectron2.modeling.backbone import Backbone from detectron2.structures import Boxes, ImageList, Instances, BitMasks from mask2former_video.modeling.criterion import VideoSetCriterion from mask2former_video.modeling.matcher import VideoHungarianMatcher, VideoHungarianMatcher_Consistent from mask2former_video.utils.memory import retry_if_cuda_oom from scipy.optimize import linear_sum_assignment from .tracker import ReferringTracker_noiser from .refiner import TemporalRefiner from .utils import loss_reid, Outputs_Memory_PerClasses import einops import torch
14,238
out_ids.append(cur_ids[k]) return { "image_size": (output_height, output_width), "pred_masks": panoptic_seg.cpu(), "segments_infos": segments_infos, "pred_ids": out_ids, "task": "vps", } def inference_video_vss( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, pred_id, aux_pred_cls=None, ): mask_cls = F.softmax(pred_cls, dim=-1)[..., :-1] if aux_pred_cls is not None: aux_pred_cls = F.softmax(aux_pred_cls, dim=-1)[..., :-1] mask_cls = torch.maximum(mask_cls, aux_pred_cls.to(mask_cls)) mask_pred = pred_masks # interpolation to original image size cur_masks = F.interpolate( mask_pred, size=first_resize_size, mode="bilinear", align_corners=False ) cur_masks = cur_masks[:, :, :img_size[0], :img_size[1]].sigmoid() cur_masks = F.interpolate( cur_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) semseg = torch.einsum("qc,qthw->cthw", mask_cls, cur_masks) sem_score, sem_mask = semseg.max(0) sem_mask = sem_mask return { "image_size": (output_height, output_width), "pred_masks": sem_mask.cpu(), "task": "vss", } def get_cl_loss_ref(self, outputs, referecne_match_result): references = outputs['pred_references'] # t q c # per frame contrastive_items = [] for i in range(references.size(0)): if i == 0: continue frame_reference = references[i] # (q, c) frame_reference_ = references[i - 1] # (q, c) if i != references.size(0) - 1: frame_reference_next = references[i + 1] else: frame_reference_next = None frame_ref_gt_indices = referecne_match_result[i] gt2ref = {} for i_ref, i_gt in zip(frame_ref_gt_indices[0], frame_ref_gt_indices[1]): gt2ref[i_gt.item()] = i_ref.item() # per instance for i_gt in gt2ref.keys(): i_ref = gt2ref[i_gt] anchor_embeds = frame_reference[[i_ref]] pos_embeds = frame_reference_[[i_ref]] neg_range = list(range(0, i_ref)) + list(range(i_ref + 1, frame_reference.size(0))) neg_embeds = frame_reference_[neg_range] num_positive = pos_embeds.shape[0] # concate pos and neg to get whole constractive samples pos_neg_embedding = torch.cat( [pos_embeds, neg_embeds], dim=0) # generate label, pos is 1, neg is 0 pos_neg_label = pos_neg_embedding.new_zeros((pos_neg_embedding.shape[0],), dtype=torch.int64) # noqa pos_neg_label[:num_positive] = 1. # dot product dot_product = torch.einsum( 'ac,kc->ak', [pos_neg_embedding, anchor_embeds]) aux_normalize_pos_neg_embedding = nn.functional.normalize( pos_neg_embedding, dim=1) aux_normalize_anchor_embedding = nn.functional.normalize( anchor_embeds, dim=1) aux_cosine_similarity = torch.einsum('ac,kc->ak', [aux_normalize_pos_neg_embedding, aux_normalize_anchor_embedding]) contrastive_items.append({ 'dot_product': dot_product, 'cosine_similarity': aux_cosine_similarity, 'label': pos_neg_label}) if frame_reference_next is not None: pos_embeds = frame_reference_next[[i_ref]] neg_range = list(range(0, i_ref)) + list(range(i_ref + 1, frame_reference.size(0))) neg_embeds = frame_reference_next[neg_range] num_positive = pos_embeds.shape[0] # concate pos and neg to get whole constractive samples pos_neg_embedding = torch.cat( [pos_embeds, neg_embeds], dim=0) # generate label, pos is 1, neg is 0 pos_neg_label = pos_neg_embedding.new_zeros((pos_neg_embedding.shape[0],), dtype=torch.int64) # noqa pos_neg_label[:num_positive] = 1. # dot product dot_product = torch.einsum( 'ac,kc->ak', [pos_neg_embedding, anchor_embeds]) aux_normalize_pos_neg_embedding = nn.functional.normalize( pos_neg_embedding, dim=1) aux_normalize_anchor_embedding = nn.functional.normalize( anchor_embeds, dim=1) aux_cosine_similarity = torch.einsum('ac,kc->ak', [aux_normalize_pos_neg_embedding, aux_normalize_anchor_embedding]) contrastive_items.append({ 'dot_product': dot_product, 'cosine_similarity': aux_cosine_similarity, 'label': pos_neg_label})
@META_ARCH_REGISTRY.register() class MinVIS(nn.Module): """ Copied from "https://github.com/NVlabs/MinVIS". """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video num_frames, window_inference, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image semantic_on: bool, whether to output semantic segmentation prediction instance_on: bool, whether to output instance segmentation prediction panoptic_on: bool, whether to output panoptic segmentation prediction test_topk_per_image: int, instance segmentation parameter, keep topk instances per image """ super().__init__() self.backbone = backbone self.sem_seg_head = sem_seg_head self.criterion = criterion self.num_queries = num_queries self.overlap_threshold = overlap_threshold self.object_mask_threshold = object_mask_threshold self.metadata = metadata if size_divisibility < 0: # use backbone size_divisibility if not set size_divisibility = self.backbone.size_divisibility self.size_divisibility = size_divisibility self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) self.num_frames = num_frames self.window_inference = window_inference @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion matcher = VideoHungarianMatcher( cost_class=class_weight, cost_mask=mask_weight, cost_dice=dice_weight, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, ) weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight} if deep_supervision: dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS aux_weight_dict = {} for i in range(dec_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ["labels", "masks"] criterion = VideoSetCriterion( sem_seg_head.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, ) return { "backbone": backbone, "sem_seg_head": sem_seg_head, "criterion": criterion, "num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, "object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, "overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), "size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, "sem_seg_postprocess_before_inference": True, "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, # video "num_frames": cfg.INPUT.SAMPLING_FRAME_NUM, "window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE, } @property def device(self): return self.pixel_mean.device def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "instances": per-region ground truth * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model (may be different from input resolution), used in inference. Returns: list[dict]: each dict has the results for one image. The dict contains the following keys: * "sem_seg": A Tensor that represents the per-pixel segmentation prediced by the head. The prediction has shape KxHxW that represents the logits of each class for each pixel. * "panoptic_seg": A tuple that represent panoptic output panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". """ images = [] for video in batched_inputs: for frame in video["image"]: images.append(frame.to(self.device)) images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.size_divisibility) if not self.training and self.window_inference: outputs = self.run_window_inference(images.tensor, window_size=3) else: features = self.backbone(images.tensor) outputs = self.sem_seg_head(features) if self.training: # mask classification target targets = self.prepare_targets(batched_inputs, images) outputs, targets = self.frame_decoder_loss_reshape(outputs, targets) # bipartite matching-based loss losses = self.criterion(outputs, targets) for k in list(losses.keys()): if k in self.criterion.weight_dict: losses[k] *= self.criterion.weight_dict[k] else: # remove this loss if not specified in `weight_dict` losses.pop(k) return losses else: outputs = self.post_processing(outputs) mask_cls_results = outputs["pred_logits"] mask_pred_results = outputs["pred_masks"] mask_cls_result = mask_cls_results[0] mask_pred_result = mask_pred_results[0] first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1]) input_per_image = batched_inputs[0] image_size = images.image_sizes[0] # image size without padding after data augmentation height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation width = input_per_image.get("width", image_size[1]) return retry_if_cuda_oom(self.inference_video)( mask_cls_result, mask_pred_result, image_size, height, width, first_resize_size) def frame_decoder_loss_reshape(self, outputs, targets): outputs['pred_masks'] = einops.rearrange(outputs['pred_masks'], 'b q t h w -> (b t) q () h w') outputs['pred_logits'] = einops.rearrange(outputs['pred_logits'], 'b t q c -> (b t) q c') if 'aux_outputs' in outputs: for i in range(len(outputs['aux_outputs'])): outputs['aux_outputs'][i]['pred_masks'] = einops.rearrange( outputs['aux_outputs'][i]['pred_masks'], 'b q t h w -> (b t) q () h w' ) outputs['aux_outputs'][i]['pred_logits'] = einops.rearrange( outputs['aux_outputs'][i]['pred_logits'], 'b t q c -> (b t) q c' ) gt_instances = [] for targets_per_video in targets: num_labeled_frames = targets_per_video['ids'].shape[1] for f in range(num_labeled_frames): labels = targets_per_video['labels'] ids = targets_per_video['ids'][:, [f]] masks = targets_per_video['masks'][:, [f], :, :] gt_instances.append({"labels": labels, "ids": ids, "masks": masks}) return outputs, gt_instances def match_from_embds(self, tgt_embds, cur_embds): cur_embds = cur_embds / cur_embds.norm(dim=1)[:, None] tgt_embds = tgt_embds / tgt_embds.norm(dim=1)[:, None] cos_sim = torch.mm(cur_embds, tgt_embds.transpose(0, 1)) cost_embd = 1 - cos_sim C = 1.0 * cost_embd C = C.cpu() indices = linear_sum_assignment(C.transpose(0, 1)) # target x current indices = indices[1] # permutation that makes current aligns to target return indices def post_processing(self, outputs): pred_logits, pred_masks, pred_embds = outputs['pred_logits'], outputs['pred_masks'], outputs['pred_embds'] pred_logits = pred_logits[0] pred_masks = einops.rearrange(pred_masks[0], 'q t h w -> t q h w') pred_embds = einops.rearrange(pred_embds[0], 'c t q -> t q c') pred_logits = list(torch.unbind(pred_logits)) pred_masks = list(torch.unbind(pred_masks)) pred_embds = list(torch.unbind(pred_embds)) out_logits = [] out_masks = [] out_embds = [] out_logits.append(pred_logits[0]) out_masks.append(pred_masks[0]) out_embds.append(pred_embds[0]) # match the instances frame by frame for i in range(1, len(pred_logits)): indices = self.match_from_embds(out_embds[-1], pred_embds[i]) out_logits.append(pred_logits[i][indices, :]) out_masks.append(pred_masks[i][indices, :, :]) out_embds.append(pred_embds[i][indices, :]) out_logits = sum(out_logits)/len(out_logits) out_masks = torch.stack(out_masks, dim=1) # q h w -> q t h w out_logits = out_logits.unsqueeze(0) out_masks = out_masks.unsqueeze(0) outputs['pred_logits'] = out_logits outputs['pred_masks'] = out_masks return outputs def run_window_inference(self, images_tensor, window_size=30): iters = len(images_tensor) // window_size if len(images_tensor) % window_size != 0: iters += 1 out_list = [] for i in range(iters): start_idx = i * window_size end_idx = (i+1) * window_size features = self.backbone(images_tensor[start_idx:end_idx]) out = self.sem_seg_head(features) del features['res2'], features['res3'], features['res4'], features['res5'] for j in range(len(out['aux_outputs'])): del out['aux_outputs'][j]['pred_masks'], out['aux_outputs'][j]['pred_logits'] out['pred_masks'] = out['pred_masks'].detach().cpu().to(torch.float32) out_list.append(out) # merge outputs outputs = {} outputs['pred_logits'] = torch.cat([x['pred_logits'] for x in out_list], dim=1).detach() outputs['pred_masks'] = torch.cat([x['pred_masks'] for x in out_list], dim=2).detach() outputs['pred_embds'] = torch.cat([x['pred_embds'] for x in out_list], dim=2).detach() return outputs def prepare_targets(self, targets, images): h_pad, w_pad = images.tensor.shape[-2:] gt_instances = [] for targets_per_video in targets: _num_instance = len(targets_per_video["instances"][0]) mask_shape = [_num_instance, self.num_frames, h_pad, w_pad] gt_masks_per_video = torch.zeros(mask_shape, dtype=torch.bool, device=self.device) gt_ids_per_video = [] gt_classes_per_video = [] for f_i, targets_per_frame in enumerate(targets_per_video["instances"]): targets_per_frame = targets_per_frame.to(self.device) h, w = targets_per_frame.image_size gt_ids_per_video.append(targets_per_frame.gt_ids[:, None]) gt_classes_per_video.append(targets_per_frame.gt_classes[:, None]) if isinstance(targets_per_frame.gt_masks, BitMasks): gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks.tensor else: # polygon gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks gt_ids_per_video = torch.cat(gt_ids_per_video, dim=1) gt_classes_per_video = torch.cat(gt_classes_per_video, dim=1).max(dim=1)[0] valid_idx = (gt_ids_per_video != -1).any(dim=-1) gt_classes_per_video = gt_classes_per_video[valid_idx] # N, gt_ids_per_video = gt_ids_per_video[valid_idx] # N, num_frames gt_instances.append({"labels": gt_classes_per_video, "ids": gt_ids_per_video}) gt_masks_per_video = gt_masks_per_video[valid_idx].float() # N, num_frames, H, W gt_instances[-1].update({"masks": gt_masks_per_video}) return gt_instances def inference_video(self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size): if len(pred_cls) > 0: scores = F.softmax(pred_cls, dim=-1)[:, :-1] labels = torch.arange( self.sem_seg_head.num_classes, device=self.device ).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) # keep top-10 predictions scores_per_image, topk_indices = scores.flatten(0, 1).topk(10, sorted=False) labels_per_image = labels[topk_indices] topk_indices = topk_indices // self.sem_seg_head.num_classes pred_masks = pred_masks[topk_indices] pred_masks = F.interpolate( pred_masks, size=first_resize_size, mode="bilinear", align_corners=False ) pred_masks = pred_masks[:, :, : img_size[0], : img_size[1]] pred_masks = F.interpolate( pred_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) masks = pred_masks > 0. out_scores = scores_per_image.tolist() out_labels = labels_per_image.tolist() out_masks = [m for m in masks.cpu()] else: out_scores = [] out_labels = [] out_masks = [] video_output = { "image_size": (output_height, output_width), "pred_scores": out_scores, "pred_labels": out_labels, "pred_masks": out_masks, } return video_output @META_ARCH_REGISTRY.register() class DVIS_Plus_online(MinVIS): """ Online version of DVIS, including a segmenter and a referring tracker. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video tracker, num_frames, window_inference, max_num, max_iter_num, window_size, task, # use_cl use_cl, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image # video tracker: a tracker module, e.g. ReferringTracker num_frames: number of frames sampled during training window_inference: if the GPU memory is insufficient to predict the entire video at once, inference needs to be performed clip by clip num_class: the categories number of the dataset max_num: the maximum number of instances retained for a video, only used in VIS max_iter_num: the iter nums window_size: the number of images processed by the segmenter at a time task: VIS, VSS or VPS """ super().__init__( backbone=backbone, sem_seg_head=sem_seg_head, criterion=criterion, num_queries=num_queries, object_mask_threshold=object_mask_threshold, overlap_threshold=overlap_threshold, metadata=metadata, size_divisibility=size_divisibility, sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference, pixel_mean=pixel_mean, pixel_std=pixel_std, # video num_frames=num_frames, window_inference=window_inference, ) # frozen the segmenter for p in self.backbone.parameters(): p.requires_grad_(False) for p in self.sem_seg_head.parameters(): p.requires_grad_(False) self.tracker = tracker self.max_num = max_num self.iter = 0 self.max_iter_num = max_iter_num self.window_size = window_size self.task = task assert self.task in ['vis', 'vss', 'vps'], "Only support vis, vss and vps !" inference_dict = { 'vis': self.inference_video_vis, 'vss': self.inference_video_vss, 'vps': self.inference_video_vps, } self.inference_video_task = inference_dict[self.task] self.use_cl = use_cl @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion matcher = VideoHungarianMatcher_Consistent( cost_class=class_weight, cost_mask=mask_weight, cost_dice=dice_weight, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, frames=cfg.INPUT.SAMPLING_FRAME_NUM ) weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight} if deep_supervision: dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS aux_weight_dict = {} for i in range(dec_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) if cfg.MODEL.TRACKER.USE_CL: weight_dict.update({'loss_reid': 2}) losses = ["labels", "masks"] criterion = VideoSetCriterion( sem_seg_head.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, ) if cfg.MODEL.MASK_FORMER.REID_BRANCH: hidden_channel = cfg.MODEL.MASK_FORMER.HIDDEN_DIM * 2 else: hidden_channel = cfg.MODEL.MASK_FORMER.HIDDEN_DIM tracker = ReferringTracker_noiser( hidden_channel=hidden_channel, feedforward_channel=cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD, num_head=cfg.MODEL.MASK_FORMER.NHEADS, decoder_layer_num=cfg.MODEL.TRACKER.DECODER_LAYERS, noise_mode=cfg.MODEL.TRACKER.NOISE_MODE, noise_ratio=cfg.MODEL.TRACKER.NOISE_RATIO, mask_dim=cfg.MODEL.MASK_FORMER.HIDDEN_DIM, class_num=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, ) max_iter_num = cfg.SOLVER.MAX_ITER return { "backbone": backbone, "sem_seg_head": sem_seg_head, "criterion": criterion, "num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, "object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, "overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), "size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, "sem_seg_postprocess_before_inference": True, "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, # video "tracker": tracker, "num_frames": cfg.INPUT.SAMPLING_FRAME_NUM, "window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE, "max_num": cfg.MODEL.MASK_FORMER.TEST.MAX_NUM, "max_iter_num": max_iter_num, "window_size": cfg.MODEL.MASK_FORMER.TEST.WINDOW_SIZE, "task": cfg.MODEL.MASK_FORMER.TEST.TASK, "use_cl": cfg.MODEL.REFINER.USE_CL, } def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "instances": per-region ground truth * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model (may be different from input resolution), used in inference. Returns: dict: For specific task, the dict contains the following keys: * For VIS: "image_size": (output_height, output_width). "pred_scores": score for per instance. "pred_labels": class for per instance. "pred_masks": list[Tensor], bit-masks for per instance, Tensor shape is (t, h, w). "pred_ids": list, query ids for per instance, list length is N. "task": "vis", * For VSS: "image_size": (output_height, output_width). "pred_masks": A Tensor that represents the per-pixel segmentation prediced by the head. The prediction has shape (t, h, w) that represents the category ID for each pixel. "task": "vss". * For VPS: "image_size": (output_height, output_width). "pred_masks": Tensor, shape is (t, h, w), that represents the unique ID for the object which each pixel belong to. "segments_infos": list[dict], info dicts for per object. Info dict including unique ID, category ID and isthing. "pred_ids": list, query ids for per thing and stuff, list length is N. "task": "vps". """ # for running demo on very long videos if 'keep' in batched_inputs[0].keys(): self.keep = batched_inputs[0]['keep'] else: self.keep = False images = [] for video in batched_inputs: for frame in video["image"]: images.append(frame.to(self.device)) images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.size_divisibility) if not self.training and self.window_inference: outputs = self.run_window_inference(images.tensor, window_size=self.window_size) else: self.backbone.eval() self.sem_seg_head.eval() with torch.no_grad(): features = self.backbone(images.tensor) image_outputs = self.sem_seg_head(features) object_labels = self._get_instance_labels(image_outputs['pred_logits']) frame_embds = image_outputs['pred_embds'].clone().detach() # (b, c, t, q) frame_embds_no_norm = image_outputs['pred_embds_without_norm'].clone().detach() # (b, c, t, q) mask_features = image_outputs['mask_features'].clone().detach().unsqueeze(0) del image_outputs['mask_features'] torch.cuda.empty_cache() outputs, indices = self.tracker(frame_embds, mask_features, return_indices=True, resume=self.keep, frame_classes=object_labels, frame_embeds_no_norm=frame_embds_no_norm) image_outputs = self.reset_image_output_order(image_outputs, indices) if self.training: targets = self.prepare_targets(batched_inputs, images) # use the segmenter prediction results to guide the matching process during early training phase image_outputs, outputs, targets = self.frame_decoder_loss_reshape( outputs, targets, image_outputs=image_outputs ) if self.iter < self.max_iter_num // 2: losses, reference_match_result = self.criterion(outputs, targets, matcher_outputs=image_outputs, ret_match_result=True) else: losses, reference_match_result = self.criterion(outputs, targets, matcher_outputs=None, ret_match_result=True) if self.use_cl: losses_cl = self.get_cl_loss_ref(outputs, reference_match_result) losses.update(losses_cl) self.iter += 1 for k in list(losses.keys()): if k in self.criterion.weight_dict: losses[k] *= self.criterion.weight_dict[k] else: # remove this loss if not specified in `weight_dict` losses.pop(k) return losses else: outputs = self.post_processing(outputs) mask_cls_results = outputs["pred_logits"] mask_pred_results = outputs["pred_masks"] pred_ids = outputs["ids"] mask_cls_result = mask_cls_results[0] mask_pred_result = mask_pred_results[0] pred_id = pred_ids[0] first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1]) input_per_image = batched_inputs[0] image_size = images.image_sizes[0] # image size without padding after data augmentation height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation width = input_per_image.get("width", image_size[1]) return retry_if_cuda_oom(self.inference_video_task)( mask_cls_result, mask_pred_result, image_size, height, width, first_resize_size, pred_id ) def _get_instance_labels(self, pred_logits): # b, t, q, c pred_logits = pred_logits[0] # (t, q, c) scores = F.softmax(pred_logits, dim=-1) labels = torch.argmax(scores, dim=2) # (t, q) labels[labels == pred_logits.size(2) - 1] = -1 return labels def frame_decoder_loss_reshape(self, outputs, targets, image_outputs=None): outputs['pred_masks'] = einops.rearrange(outputs['pred_masks'], 'b q t h w -> (b t) q () h w') outputs['pred_logits'] = einops.rearrange(outputs['pred_logits'], 'b t q c -> (b t) q c') outputs['pred_references'] = einops.rearrange(outputs['pred_references'], 'b c t q -> (b t) q c') if image_outputs is not None: image_outputs['pred_masks'] = einops.rearrange(image_outputs['pred_masks'], 'b q t h w -> (b t) q () h w') image_outputs['pred_logits'] = einops.rearrange(image_outputs['pred_logits'], 'b t q c -> (b t) q c') if 'aux_outputs' in outputs: for i in range(len(outputs['aux_outputs'])): outputs['aux_outputs'][i]['pred_masks'] = einops.rearrange( outputs['aux_outputs'][i]['pred_masks'], 'b q t h w -> (b t) q () h w' ) outputs['aux_outputs'][i]['pred_logits'] = einops.rearrange( outputs['aux_outputs'][i]['pred_logits'], 'b t q c -> (b t) q c' ) gt_instances = [] for targets_per_video in targets: num_labeled_frames = targets_per_video['ids'].shape[1] for f in range(num_labeled_frames): labels = targets_per_video['labels'] ids = targets_per_video['ids'][:, [f]] masks = targets_per_video['masks'][:, [f], :, :] gt_instances.append({"labels": labels, "ids": ids, "masks": masks}) return image_outputs, outputs, gt_instances def reset_image_output_order(self, output, indices): """ in order to maintain consistency between the initial query and the guided results (segmenter prediction) :param output: segmenter prediction results (image-level segmentation results) :param indices: matched indicates :return: reordered outputs """ # pred_keys, (b, c, t, q) indices = torch.Tensor(indices).to(torch.int64) # (t, q) frame_indices = torch.range(0, indices.shape[0] - 1).to(indices).unsqueeze(1).repeat(1, indices.shape[1]) # pred_masks, shape is (b, q, t, h, w) output['pred_masks'][0] = output['pred_masks'][0][indices, frame_indices].transpose(0, 1) # pred logits, shape is (b, t, q, c) output['pred_logits'][0] = output['pred_logits'][0][frame_indices, indices] return output def post_processing(self, outputs, aux_logits=None): """ average the class logits and append query ids """ pred_logits = outputs['pred_logits'] pred_logits = pred_logits[0] # (t, q, c) out_logits = torch.mean(pred_logits, dim=0).unsqueeze(0) if aux_logits is not None: aux_logits = aux_logits[0] aux_logits = torch.mean(aux_logits, dim=0) # (q, c) outputs['pred_logits'] = out_logits outputs['ids'] = [torch.arange(0, outputs['pred_masks'].size(1))] if aux_logits is not None: return outputs, aux_logits return outputs def run_window_inference(self, images_tensor, window_size=30): iters = len(images_tensor) // window_size if len(images_tensor) % window_size != 0: iters += 1 out_list = [] for i in range(iters): start_idx = i * window_size end_idx = (i+1) * window_size # segmeter inference features = self.backbone(images_tensor[start_idx:end_idx]) out = self.sem_seg_head(features) # remove unnecessary variables to save GPU memory del features['res2'], features['res3'], features['res4'], features['res5'] for j in range(len(out['aux_outputs'])): del out['aux_outputs'][j]['pred_masks'], out['aux_outputs'][j]['pred_logits'] # referring tracker inference frame_embds = out['pred_embds'] # (b, c, t, q) frame_embds_no_norm = out['pred_embds_without_norm'] mask_features = out['mask_features'].unsqueeze(0) if i != 0 or self.keep: track_out = self.tracker(frame_embds, mask_features, resume=True, frame_embeds_no_norm=frame_embds_no_norm) else: track_out = self.tracker(frame_embds, mask_features, frame_embeds_no_norm=frame_embds_no_norm) # remove unnecessary variables to save GPU memory del mask_features for j in range(len(track_out['aux_outputs'])): del track_out['aux_outputs'][j]['pred_masks'], track_out['aux_outputs'][j]['pred_logits'] track_out['pred_logits'] = track_out['pred_logits'].to(torch.float32).detach().cpu() track_out['pred_masks'] = track_out['pred_masks'].to(torch.float32).detach().cpu() track_out['pred_embds'] = track_out['pred_embds'].to(torch.float32).detach().cpu() # track_out['pred_logits'] = track_out['pred_logits'].detach() # track_out['pred_masks'] = track_out['pred_masks'].detach() # track_out['pred_embds'] = track_out['pred_embds'].detach() out_list.append(track_out) # merge outputs outputs = {} outputs['pred_logits'] = torch.cat([x['pred_logits'] for x in out_list], dim=1) outputs['pred_masks'] = torch.cat([x['pred_masks'] for x in out_list], dim=2) outputs['pred_embds'] = torch.cat([x['pred_embds'] for x in out_list], dim=2) return outputs def inference_video_vis( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, pred_id, aux_pred_cls=None, ): if len(pred_cls) > 0: scores = F.softmax(pred_cls, dim=-1)[:, :-1] if aux_pred_cls is not None: aux_pred_cls = F.softmax(aux_pred_cls, dim=-1)[:, :-1] scores = torch.maximum(scores, aux_pred_cls.to(scores)) labels = torch.arange( self.sem_seg_head.num_classes, device=self.device ).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) # keep top-K predictions scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.max_num, sorted=False) labels_per_image = labels[topk_indices] topk_indices = topk_indices // self.sem_seg_head.num_classes pred_masks = pred_masks[topk_indices] pred_ids = pred_id[topk_indices] # interpolation to original image size pred_masks = F.interpolate( pred_masks, size=first_resize_size, mode="bilinear", align_corners=False ) pred_masks = pred_masks[:, :, : img_size[0], : img_size[1]] pred_masks = F.interpolate( pred_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) masks = pred_masks > 0. del pred_masks out_scores = scores_per_image.tolist() out_labels = labels_per_image.tolist() out_ids = pred_ids.tolist() out_masks = [m for m in masks.cpu()] else: out_scores = [] out_labels = [] out_masks = [] out_ids = [] video_output = { "image_size": (output_height, output_width), "pred_scores": out_scores, "pred_labels": out_labels, "pred_masks": out_masks, "pred_ids": out_ids, "task": "vis", } return video_output def inference_video_vps( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, pred_id, aux_pred_cls=None, ): pred_cls = F.softmax(pred_cls, dim=-1) if aux_pred_cls is not None: aux_pred_cls = F.softmax(aux_pred_cls, dim=-1)[:, :-1] pred_cls[:, :-1] = torch.maximum(pred_cls[:, :-1], aux_pred_cls.to(pred_cls)) mask_pred = pred_masks scores, labels = pred_cls.max(-1) # filter out the background prediction keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_ids = pred_id[keep] cur_masks = mask_pred[keep] # interpolation to original image size cur_masks = F.interpolate( cur_masks, size=first_resize_size, mode="bilinear", align_corners=False ) cur_masks = cur_masks[:, :, :img_size[0], :img_size[1]].sigmoid() cur_masks = F.interpolate( cur_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) cur_prob_masks = cur_scores.view(-1, 1, 1, 1).to(cur_masks.device) * cur_masks # initial panoptic_seg and segments infos h, w = cur_masks.shape[-2:] panoptic_seg = torch.zeros((cur_masks.size(1), h, w), dtype=torch.int32, device=cur_masks.device) segments_infos = [] out_ids = [] current_segment_id = 0 if cur_masks.shape[0] == 0: # We didn't detect any mask return { "image_size": (output_height, output_width), "pred_masks": panoptic_seg.cpu(), "segments_infos": segments_infos, "pred_ids": out_ids, "task": "vps", } else: # take argmax cur_mask_ids = cur_prob_masks.argmax(0) # (t, h, w) stuff_memory_list = {} for k in range(cur_classes.shape[0]): pred_class = cur_classes[k].item() isthing = pred_class < len(self.metadata.thing_dataset_id_to_contiguous_id) # filter out the unstable segmentation results mask_area = (cur_mask_ids == k).sum().item() original_area = (cur_masks[k] >= 0.5).sum().item() mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: if mask_area / original_area < self.overlap_threshold: continue # merge stuff regions if not isthing: if int(pred_class) in stuff_memory_list.keys(): panoptic_seg[mask] = stuff_memory_list[int(pred_class)] continue else: stuff_memory_list[int(pred_class)] = current_segment_id + 1 current_segment_id += 1 panoptic_seg[mask] = current_segment_id segments_infos.append( { "id": current_segment_id, "isthing": bool(isthing), "category_id": int(pred_class), } ) out_ids.append(cur_ids[k]) return { "image_size": (output_height, output_width), "pred_masks": panoptic_seg.cpu(), "segments_infos": segments_infos, "pred_ids": out_ids, "task": "vps", } def inference_video_vss( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, pred_id, aux_pred_cls=None, ): mask_cls = F.softmax(pred_cls, dim=-1)[..., :-1] if aux_pred_cls is not None: aux_pred_cls = F.softmax(aux_pred_cls, dim=-1)[..., :-1] mask_cls = torch.maximum(mask_cls, aux_pred_cls.to(mask_cls)) mask_pred = pred_masks # interpolation to original image size cur_masks = F.interpolate( mask_pred, size=first_resize_size, mode="bilinear", align_corners=False ) cur_masks = cur_masks[:, :, :img_size[0], :img_size[1]].sigmoid() cur_masks = F.interpolate( cur_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) semseg = torch.einsum("qc,qthw->cthw", mask_cls, cur_masks) sem_score, sem_mask = semseg.max(0) sem_mask = sem_mask return { "image_size": (output_height, output_width), "pred_masks": sem_mask.cpu(), "task": "vss", } def get_cl_loss_ref(self, outputs, referecne_match_result): references = outputs['pred_references'] # t q c # per frame contrastive_items = [] for i in range(references.size(0)): if i == 0: continue frame_reference = references[i] # (q, c) frame_reference_ = references[i - 1] # (q, c) if i != references.size(0) - 1: frame_reference_next = references[i + 1] else: frame_reference_next = None frame_ref_gt_indices = referecne_match_result[i] gt2ref = {} for i_ref, i_gt in zip(frame_ref_gt_indices[0], frame_ref_gt_indices[1]): gt2ref[i_gt.item()] = i_ref.item() # per instance for i_gt in gt2ref.keys(): i_ref = gt2ref[i_gt] anchor_embeds = frame_reference[[i_ref]] pos_embeds = frame_reference_[[i_ref]] neg_range = list(range(0, i_ref)) + list(range(i_ref + 1, frame_reference.size(0))) neg_embeds = frame_reference_[neg_range] num_positive = pos_embeds.shape[0] # concate pos and neg to get whole constractive samples pos_neg_embedding = torch.cat( [pos_embeds, neg_embeds], dim=0) # generate label, pos is 1, neg is 0 pos_neg_label = pos_neg_embedding.new_zeros((pos_neg_embedding.shape[0],), dtype=torch.int64) # noqa pos_neg_label[:num_positive] = 1. # dot product dot_product = torch.einsum( 'ac,kc->ak', [pos_neg_embedding, anchor_embeds]) aux_normalize_pos_neg_embedding = nn.functional.normalize( pos_neg_embedding, dim=1) aux_normalize_anchor_embedding = nn.functional.normalize( anchor_embeds, dim=1) aux_cosine_similarity = torch.einsum('ac,kc->ak', [aux_normalize_pos_neg_embedding, aux_normalize_anchor_embedding]) contrastive_items.append({ 'dot_product': dot_product, 'cosine_similarity': aux_cosine_similarity, 'label': pos_neg_label}) if frame_reference_next is not None: pos_embeds = frame_reference_next[[i_ref]] neg_range = list(range(0, i_ref)) + list(range(i_ref + 1, frame_reference.size(0))) neg_embeds = frame_reference_next[neg_range] num_positive = pos_embeds.shape[0] # concate pos and neg to get whole constractive samples pos_neg_embedding = torch.cat( [pos_embeds, neg_embeds], dim=0) # generate label, pos is 1, neg is 0 pos_neg_label = pos_neg_embedding.new_zeros((pos_neg_embedding.shape[0],), dtype=torch.int64) # noqa pos_neg_label[:num_positive] = 1. # dot product dot_product = torch.einsum( 'ac,kc->ak', [pos_neg_embedding, anchor_embeds]) aux_normalize_pos_neg_embedding = nn.functional.normalize( pos_neg_embedding, dim=1) aux_normalize_anchor_embedding = nn.functional.normalize( anchor_embeds, dim=1) aux_cosine_similarity = torch.einsum('ac,kc->ak', [aux_normalize_pos_neg_embedding, aux_normalize_anchor_embedding]) contrastive_items.append({ 'dot_product': dot_product, 'cosine_similarity': aux_cosine_similarity, 'label': pos_neg_label})
losses = loss_reid(contrastive_items, outputs)
6
2023-11-14 10:55:11+00:00
16k
ej0cl6/TextEE
TextEE/models/Ampere/model_copyutils.py
[ { "identifier": "PrefixGenBartForConditionalGeneration", "path": "TextEE/models/Ampere/prefix_gen_bart.py", "snippet": "class PrefixGenBartForConditionalGeneration(BartPretrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [r\"final_logits_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config: BartConfig):\n super().__init__(config)\n self.model = PrefixGenBartModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(BART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n prefix: Optional[Dict] = None, # Tag: Changed\n attention_mask: Optional[torch.Tensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n Returns:\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if use_cache:\n logger.warning(\"The `use_cache` argument is changed to `False` since `labels` is provided.\")\n use_cache = False\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n outputs = self.model(\n input_ids,\n prefix=prefix,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n # Tag: Changed\n def _prepare_encoder_decoder_kwargs_for_generation(\n self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None\n ) -> Dict[str, Any]:\n # 1. get encoder\n encoder = self.get_encoder()\n\n # 2. prepare encoder args and encoder kwargs from model kwargs\n irrelevant_prefix = [\"decoder_\", \"cross_attn\", \"use_cache\", \"prefix\"]\n encoder_kwargs = {\n argument: value\n for argument, value in model_kwargs.items()\n if not any(argument.startswith(p) for p in irrelevant_prefix)\n }\n\n # 3. make sure that encoder returns `ModelOutput`\n model_input_name = model_input_name if model_input_name is not None else self.main_input_name\n encoder_kwargs[\"return_dict\"] = True\n encoder_kwargs[model_input_name] = inputs_tensor\n model_kwargs[\"encoder_outputs\"]: ModelOutput = encoder(**encoder_kwargs)\n\n return model_kwargs\n\n # Tag: Changed\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs\n ):\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"prefix\": kwargs.get('prefix', None),\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past" }, { "identifier": "AMRBartTokenizer", "path": "TextEE/models/Ampere/AMRBART/AMRBartTokenizer.py", "snippet": "class AMRBartTokenizer(BartTokenizer):\n INIT = 'Ġ'\n \n def __init__(self, vocab_file, merges_file, errors=\"replace\", bos_token=\"<s>\", eos_token=\"</s>\", sep_token=\"</s>\", cls_token=\"<s>\", unk_token=\"<unk>\", pad_token=\"<pad>\", mask_token=\"<mask>\", add_prefix_space=False, **kwargs):\n super().__init__(vocab_file, merges_file, errors, bos_token, eos_token, sep_token, cls_token, unk_token, pad_token, mask_token, add_prefix_space, **kwargs)\n self.modified = 0\n self.recategorizations = set(recategorizations)\n self.patterns = re.compile(r\"\"\" ?<[a-z]+:?\\d*>| ?:[^\\s]+|'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\"\"\")\n self.remove_pars = False\n \n @classmethod\n def from_pretrained(cls, pretrained_model_path, *args, **kwargs):\n inst = super().from_pretrained(pretrained_model_path, *args, **kwargs)\n inst.init_amr_vocabulary()\n return inst\n \n def init_amr_vocabulary(self):\n self.old_enc_size = old_enc_size = len(self.encoder)\n tokens = [t for t in raw_special_tokens if t not in self.encoder]\n\n for i, t in enumerate(tokens, start=old_enc_size):\n self.encoder[t] = i\n\n self.encoder = {k: i for i, (k,v) in enumerate(sorted(self.encoder.items(), key=lambda x: x[1]))}\n self.decoder = {v: k for k, v in sorted(self.encoder.items(), key=lambda x: x[1])}\n self.modified = len(tokens)\n\n self.amr_bos_token = \"<AMR>\"\n self.amr_bos_token_id = self.encoder[self.amr_bos_token]\n self.amr_eos_token = \"</AMR>\"\n self.amr_eos_token_id = self.encoder[self.amr_eos_token]\n print(f\"Added {self.modified} AMR tokens\")\n \n def _tokenize(self, text):\n \"\"\" Tokenize a string. Modified in order to handle sentences with recategorization pointers\"\"\"\n bpe_tokens = []\n for tok_span in text.lstrip().split(' '):\n tok_span = tok_span.strip()\n recats = tok_span.rsplit('_', 1)\n if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:\n bpe_tokens.extend([self.INIT + recats[0], '_' + recats[1]])\n else:\n for token in re.findall(self.pat, ' ' + tok_span):\n token = \"\".join(\n self.byte_encoder[b] for b in token.encode(\"utf-8\")\n ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)\n bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(\" \"))\n\n return bpe_tokens\n\n def _tok_bpe(self, token):\n tokk = []\n tok = token.strip()\n recats = tok.rsplit('_', 1)\n if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:\n tokk.extend([self.INIT + recats[0], '_' + recats[1]])\n else:\n for tok in self.patterns.findall(' ' + token):\n tok = \"\".join(\n self.byte_encoder[b] for b in tok.encode(\"utf-8\"))\n toks = self.bpe(tok).split(' ')\n tokk.extend(toks)\n return tokk\n\n def tokenize_amr(self, amr_tokens):\n bpe_tokens = []\n for i, tokk in enumerate(amr_tokens):\n is_in_enc = self.INIT + tokk in self.encoder\n is_rel = tokk.startswith(':') and len(tokk) > 1\n is_spc = tokk.startswith('<') and tokk.endswith('>')\n is_of = tokk.startswith(':') and tokk.endswith('-of')\n is_frame = re.match(r'.+-\\d\\d', tokk) is not None\n\n if tokk.startswith('\"') and tokk.endswith('\"'): # dealing with examples like \"The_United_Kingdom_of_xxx\"\n tokk = tokk[1:-1].replace('_', ' ')\n bpe_toks = [self.INIT + \"<lit>\"]\n bpe_toks += self._tok_bpe(tokk)\n bpe_toks.append(self.INIT + \"</lit>\")\n\n elif (is_rel or is_spc or is_frame or is_of):\n if is_in_enc:\n bpe_toks = [self.INIT + tokk]\n elif is_frame:\n bpe_toks = self._tok_bpe(tokk[:-3]) + [tokk[-3:]]\n elif is_of:\n rel = tokk[:-3]\n if self.INIT + rel in self.encoder:\n bpe_toks = [self.INIT + rel, '-of']\n else:\n bpe_toks = [self.INIT + ':'] + self._tok_bpe(rel[1:]) + ['-of']\n elif is_rel:\n bpe_toks = [self.INIT + ':'] + self._tok_bpe(tokk[1:])\n else:\n print(\"tok:\", tokk)\n print(f\"is_rel:{is_rel}, is_spc:{is_spc}, is_frame:{is_frame}, is_of:{is_of}\")\n exit()\n raise\n else:\n if is_in_enc:\n bpe_toks = [self.INIT + tokk]\n else:\n bpe_toks = self._tok_bpe(tokk)\n\n bpe_tokens.append(bpe_toks)\n bpe_tokens = [b for bb in bpe_tokens for b in bb]\n bpe_token_ids = [self.encoder.get(b, self.unk_token_id) for b in bpe_tokens]\n return bpe_token_ids\n \n def decode_amr(self, tokens, restore_name_ops=None):\n try:\n nodes, backreferences = postprocessing.decode_into_node_and_backreferences(tokens, self)\n except Exception as e:\n print('Decoding failure:', file=sys.stderr)\n print(e, file=sys.stderr)\n return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)\n try:\n graph_ = graph = self._fix_and_make_graph(nodes)\n # if collapse_name_ops:\n # graph_ = graph = postprocessing._split_name_ops(graph)\n except Exception as e:\n print('Building failure:', file=sys.stderr)\n print(nodes, file=sys.stderr)\n print(backreferences, file=sys.stderr)\n print(e, file=sys.stderr)\n return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)\n try:\n graph, status = postprocessing.connect_graph_if_not_connected(graph)\n if status == postprocessing.ParsedStatus.BACKOFF:\n print('Reconnection 1 failure:')\n print(nodes, file=sys.stderr)\n print(backreferences, file=sys.stderr)\n print(graph_, file=sys.stderr)\n return graph, status, (nodes, backreferences)\n except Exception as e:\n print('Reconnction 2 failure:', file=sys.stderr)\n print(e, file=sys.stderr)\n print(nodes, file=sys.stderr)\n print(backreferences, file=sys.stderr)\n print(graph_, file=sys.stderr)\n return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (nodes, backreferences)\n \n def _fix_and_make_graph(self, nodes):\n\n nodes_ = []\n for n in nodes:\n if isinstance(n, str):\n if n.startswith('<') and n.endswith('>') and (not n.startswith('<pointer:')):\n pass\n else:\n nodes_.append(n)\n else:\n nodes_.append(n)\n nodes = nodes_\n\n if True:\n i = 0\n nodes_ = []\n while i < len(nodes):\n nxt = nodes[i]\n pst = None\n if isinstance(nxt, str) and nxt.startswith('<pointer:'):\n e = nxt.find('>')\n if e != len(nxt) -1:\n pst = nxt[e+1:]\n nxt = nxt[:e+1]\n nodes_.append(nxt)\n if pst is not None:\n nodes_.append(pst)\n else:\n nodes_.append(nxt)\n i += 1\n nodes = nodes_\n\n i = 1\n nodes_ = [nodes[0]]\n while i < len(nodes):\n nxt = nodes[i]\n if isinstance(nxt, str) and nxt.startswith('<pointer:'):\n nxt = 'z' + nxt[9:-1]\n fol = nodes[i+1]\n # is not expansion\n if isinstance(fol, str) and (fol.startswith(':') or (fol == ')')):\n nodes_.append(nxt)\n else:\n if self.remove_pars:\n nodes_.append('(')\n else:\n if nodes_[-1] != '(':\n nodes_.append('(')\n #pass\n nodes_.append(nxt)\n nodes_.append('/')\n else:\n nodes_.append(nxt)\n i += 1\n nodes = nodes_\n\n i = 0\n nodes_ = []\n while i < (len(nodes) - 1):\n if nodes[i] == ':':\n nodes_.append(nodes[i] + nodes[i+1])\n i += 2\n last = False\n else:\n nodes_.append(nodes[i])\n i += 1\n last = True\n if last:\n nodes_.append(nodes[-1])\n nodes = nodes_\n\n i = 0\n nodes_ = []\n while i < (len(nodes)):\n if i < 2:\n nodes_.append(nodes[i])\n i += 1\n elif nodes_[-2] == '/' and nodes[i] == '/':\n i += 2\n else:\n nodes_.append(nodes[i])\n i += 1\n nodes = nodes_\n\n i = 0\n newvars = 0\n variables = set()\n remap = {}\n nodes_ = []\n while i < (len(nodes)):\n\n next = nodes[i]\n\n if next == '/':\n last = nodes_[-1]\n if last in variables:\n last_remap = f\"z{newvars+1000}\"\n newvars += 1\n nodes_[-1] = last_remap\n remap[last] = last_remap\n variables.add(last)\n nodes_.append(next)\n\n elif self._classify(next) == 'VAR' and next in remap and (i < len(nodes) - 1) and nodes[i+1] != '/':\n next = remap[next]\n nodes_.append(next)\n\n else:\n nodes_.append(next)\n\n i += 1\n\n nodes = nodes_\n pieces_ = []\n open_cnt = 0\n closed_cnt = 0\n if nodes[0] != '(':\n pieces_.append('(')\n open_cnt += 1\n for p in nodes:\n if p == '(':\n open_cnt += 1\n elif p == ')':\n closed_cnt += 1\n pieces_.append(p)\n if open_cnt == closed_cnt:\n break\n nodes = pieces_ + [')'] * (open_cnt - closed_cnt)\n\n pieces = []\n for piece in nodes:\n if not pieces:\n pieces.append('(')\n else:\n piece = str(piece)\n if piece.startswith('\"') or piece.startswith('\"') or '\"' in piece.strip('\"'):\n piece = '\"' + piece.replace('\"', '') + '\"'\n\n prev = self._classify(pieces[-1])\n next = self._classify(piece)\n\n if next == 'CONST':\n quote = False\n for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\\\', '_', '='):\n if char in piece:\n quote = True\n break\n if quote:\n piece = '\"' + piece.strip('\"') + '\"'\n\n if prev == '(':\n if next in ('VAR', 'I'):\n pieces.append(piece)\n elif prev == ')':\n if next in (')', 'EDGE', 'MODE'):\n pieces.append(piece)\n elif prev == 'VAR':\n if next in ('/', 'EDGE', 'MODE', ')'):\n pieces.append(piece)\n elif prev == '/':\n if next in ('INST', 'I'):\n pieces.append(piece)\n elif prev == 'INST':\n if next in (')', 'EDGE', 'MODE'):\n pieces.append(piece)\n elif prev == 'I':\n if next in ('/', ')', 'EDGE', 'MODE'):\n pieces.append(piece)\n elif prev == 'EDGE':\n if next in ('(', 'VAR', 'CONST', 'I'):\n pieces.append(piece)\n elif next == ')':\n pieces[-1] = piece\n elif next in ('EDGE', 'MODE'):\n pieces[-1] = piece\n elif prev == 'MODE':\n if next == 'INST':\n pieces.append(piece)\n elif prev == 'CONST':\n if next in (')', 'EDGE', 'MODE'):\n pieces.append(piece)\n\n pieces_ = []\n open_cnt = 0\n closed_cnt = 0\n if pieces[0] != '(':\n pieces_.append('(')\n open_cnt += 1\n for p in pieces:\n if p == '(':\n open_cnt += 1\n elif p == ')':\n closed_cnt += 1\n pieces_.append(p)\n if open_cnt == closed_cnt:\n break\n pieces = pieces_ + [')'] * (open_cnt - closed_cnt)\n\n linearized = re.sub(r'\\s+', ' ', ' '.join(pieces)).strip()\n\n \"\"\"\n line = linearized\n # make sure parentheses match\n # copied from https://github.com/RikVN/AMR/blob/master/restoreAMR/restore_amr.py\n open_count = 0\n close_count = 0\n for i, c in enumerate(line):\n if c == '(':\n open_count += 1\n elif c == ')':\n close_count += 1\n if open_count == close_count and open_count > 0:\n line = line[:i].strip()\n break\n old_line = line\n while True:\n open_count = len(re.findall(r'\\(', line))\n close_count = len(re.findall(r'\\)', line))\n if open_count > close_count:\n line += ')' * (open_count - close_count)\n elif close_count > open_count:\n for i in range(close_count - open_count):\n line = line.rstrip(')')\n line = line.rstrip(' ')\n if old_line == line:\n break\n old_line = line\n \"\"\"\n\n graph = penman.decode(linearized + ' ')\n triples = []\n newvars = 2000\n for triple in graph.triples:\n x, rel, y = triple\n if x is None:\n pass\n elif rel == ':instance' and y is None:\n triples.append(penman.Triple(x, rel, 'thing'))\n elif y is None:\n var = f'z{newvars}'\n newvars += 1\n triples.append(penman.Triple(x, rel, var))\n triples.append(penman.Triple(var, ':instance', 'thing'))\n else:\n triples.append(triple)\n graph = penman.Graph(triples)\n linearized = encode(graph)\n\n def fix_text(linearized=linearized):\n n = 0\n def _repl1(match):\n nonlocal n\n out = match.group(1) + match.group(2) + str(3000 + n) + ' / ' + match.group(2) + match.group(3)\n n += 1\n return out\n linearized = re.sub(r'(\\(\\s?)([a-z])([^\\/:\\)]+[:\\)])', _repl1, linearized,\n flags=re.IGNORECASE | re.MULTILINE)\n\n def _repl2(match):\n return match.group(1)\n linearized = re.sub(r'(\\(\\s*[a-z][\\d+]\\s*\\/\\s*[^\\s\\)\\(:\\/]+\\s*)((?:/\\s*[^\\s\\)\\(:\\/]+\\s*)+)', _repl2,\n linearized,\n flags=re.IGNORECASE | re.MULTILINE)\n\n # adds a ':' to args w/o it\n linearized = re.sub(r'([^:])(ARG)', r'\\1 :\\2', linearized)\n\n # removes edges with no node\n # linearized = re.sub(r':[^\\s\\)\\(:\\/]+?\\s*\\)', ')', linearized, flags=re.MULTILINE)\n\n return linearized\n\n linearized = fix_text(linearized)\n g = penman.decode(linearized)\n return g\n \n def _classify(self, node):\n if not isinstance(node, str):\n return \"CONST\"\n elif node == 'i':\n return \"I\"\n elif re.match(r'^[a-z]\\d*$', node) is not None:\n return \"VAR\"\n elif node[0].isdigit():\n return \n elif node.startswith('\"') and node.endswith('\"'):\n return \"CONST\"\n elif node in ('+', '-'):\n return \"CONST\"\n elif node == ':mode':\n return 'MODE'\n elif node.startswith(':'):\n return \"EDGE\"\n elif node in ['/', '(', ')']:\n return node\n elif node[0].isalpha():\n for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\\\'):\n if char in node:\n return \"CONST\"\n return \"INST\"\n else:\n return 'CONST'" }, { "identifier": "AMRRobertaTokenizer", "path": "TextEE/models/Ampere/AMRBART/AMRBartTokenizer.py", "snippet": "class AMRRobertaTokenizer(RobertaTokenizer):\n INIT = 'Ġ'\n \n def __init__(self, vocab_file, merges_file, errors=\"replace\", bos_token=\"<s>\", eos_token=\"</s>\", sep_token=\"</s>\", cls_token=\"<s>\", unk_token=\"<unk>\", pad_token=\"<pad>\", mask_token=\"<mask>\", add_prefix_space=False, **kwargs):\n super().__init__(vocab_file, merges_file, errors, bos_token, eos_token, sep_token, cls_token, unk_token, pad_token, mask_token, add_prefix_space, **kwargs)\n self.modified = 0\n self.recategorizations = set(recategorizations)\n self.patterns = re.compile(r\"\"\" ?<[a-z]+:?\\d*>| ?:[^\\s]+|'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\"\"\")\n self.remove_pars = False\n \n @classmethod\n def from_pretrained(cls, pretrained_model_path, *args, **kwargs):\n inst = super().from_pretrained(pretrained_model_path, *args, **kwargs)\n inst.init_amr_vocabulary()\n return inst\n \n def init_amr_vocabulary(self):\n self.old_enc_size = old_enc_size = len(self.encoder)\n tokens = [t for t in raw_special_tokens_roberta if t not in self.encoder]\n\n for i, t in enumerate(tokens, start=old_enc_size):\n self.encoder[t] = i\n\n self.encoder = {k: i for i, (k,v) in enumerate(sorted(self.encoder.items(), key=lambda x: x[1]))}\n self.decoder = {v: k for k, v in sorted(self.encoder.items(), key=lambda x: x[1])}\n self.modified = len(tokens)\n\n self.amr_bos_token = \"<AMR>\"\n self.amr_bos_token_id = self.encoder[self.amr_bos_token]\n self.amr_eos_token = \"</AMR>\"\n self.amr_eos_token_id = self.encoder[self.amr_eos_token]\n print(f\"Added {self.modified} AMR tokens\")\n \n def _tokenize(self, text):\n \"\"\" Tokenize a string. Modified in order to handle sentences with recategorization pointers\"\"\"\n bpe_tokens = []\n for tok_span in text.lstrip().split(' '):\n tok_span = tok_span.strip()\n recats = tok_span.rsplit('_', 1)\n if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:\n bpe_tokens.extend([self.INIT + recats[0], '_' + recats[1]])\n else:\n for token in re.findall(self.pat, ' ' + tok_span):\n token = \"\".join(\n self.byte_encoder[b] for b in token.encode(\"utf-8\")\n ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)\n bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(\" \"))\n\n return bpe_tokens\n\n def _tok_bpe(self, token):\n tokk = []\n tok = token.strip()\n recats = tok.rsplit('_', 1)\n if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:\n tokk.extend([self.INIT + recats[0], '_' + recats[1]])\n else:\n for tok in self.patterns.findall(' ' + token):\n tok = \"\".join(\n self.byte_encoder[b] for b in tok.encode(\"utf-8\"))\n toks = self.bpe(tok).split(' ')\n tokk.extend(toks)\n return tokk\n\n def tokenize_amr(self, amr_tokens):\n bpe_tokens = []\n for i, tokk in enumerate(amr_tokens):\n is_in_enc = self.INIT + tokk in self.encoder\n is_rel = tokk.startswith(':') and len(tokk) > 1\n is_spc = tokk.startswith('<') and tokk.endswith('>')\n is_of = tokk.startswith(':') and tokk.endswith('-of')\n is_frame = re.match(r'.+-\\d\\d', tokk) is not None\n\n if tokk.startswith('\"') and tokk.endswith('\"'): # dealing with examples like \"The_United_Kingdom_of_xxx\"\n tokk = tokk[1:-1].replace('_', ' ')\n bpe_toks = [self.INIT + \"<lit>\"]\n bpe_toks += self._tok_bpe(tokk)\n bpe_toks.append(self.INIT + \"</lit>\")\n\n elif (is_rel or is_spc or is_frame or is_of):\n if is_in_enc:\n bpe_toks = [self.INIT + tokk]\n elif is_frame:\n bpe_toks = self._tok_bpe(tokk[:-3]) + [tokk[-3:]]\n elif is_of:\n rel = tokk[:-3]\n if self.INIT + rel in self.encoder:\n bpe_toks = [self.INIT + rel, '-of']\n else:\n bpe_toks = [self.INIT + ':'] + self._tok_bpe(rel[1:]) + ['-of']\n elif is_rel:\n bpe_toks = [self.INIT + ':'] + self._tok_bpe(tokk[1:])\n else:\n print(\"tok:\", tokk)\n print(f\"is_rel:{is_rel}, is_spc:{is_spc}, is_frame:{is_frame}, is_of:{is_of}\")\n exit()\n raise\n else:\n if is_in_enc:\n bpe_toks = [self.INIT + tokk]\n else:\n bpe_toks = self._tok_bpe(tokk)\n\n bpe_tokens.append(bpe_toks)\n bpe_tokens = [b for bb in bpe_tokens for b in bb]\n bpe_token_ids = [self.encoder.get(b, self.unk_token_id) for b in bpe_tokens]\n return bpe_token_ids\n \n def decode_amr(self, tokens, restore_name_ops=None):\n try:\n nodes, backreferences = postprocessing.decode_into_node_and_backreferences(tokens, self)\n except Exception as e:\n print('Decoding failure:', file=sys.stderr)\n print(e, file=sys.stderr)\n return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)\n try:\n graph_ = graph = self._fix_and_make_graph(nodes)\n # if collapse_name_ops:\n # graph_ = graph = postprocessing._split_name_ops(graph)\n except Exception as e:\n print('Building failure:', file=sys.stderr)\n print(nodes, file=sys.stderr)\n print(backreferences, file=sys.stderr)\n print(e, file=sys.stderr)\n return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)\n try:\n graph, status = postprocessing.connect_graph_if_not_connected(graph)\n if status == postprocessing.ParsedStatus.BACKOFF:\n print('Reconnection 1 failure:')\n print(nodes, file=sys.stderr)\n print(backreferences, file=sys.stderr)\n print(graph_, file=sys.stderr)\n return graph, status, (nodes, backreferences)\n except Exception as e:\n print('Reconnction 2 failure:', file=sys.stderr)\n print(e, file=sys.stderr)\n print(nodes, file=sys.stderr)\n print(backreferences, file=sys.stderr)\n print(graph_, file=sys.stderr)\n return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (nodes, backreferences)\n \n def _fix_and_make_graph(self, nodes):\n\n nodes_ = []\n for n in nodes:\n if isinstance(n, str):\n if n.startswith('<') and n.endswith('>') and (not n.startswith('<pointer:')):\n pass\n else:\n nodes_.append(n)\n else:\n nodes_.append(n)\n nodes = nodes_\n\n if True:\n i = 0\n nodes_ = []\n while i < len(nodes):\n nxt = nodes[i]\n pst = None\n if isinstance(nxt, str) and nxt.startswith('<pointer:'):\n e = nxt.find('>')\n if e != len(nxt) -1:\n pst = nxt[e+1:]\n nxt = nxt[:e+1]\n nodes_.append(nxt)\n if pst is not None:\n nodes_.append(pst)\n else:\n nodes_.append(nxt)\n i += 1\n nodes = nodes_\n\n i = 1\n nodes_ = [nodes[0]]\n while i < len(nodes):\n nxt = nodes[i]\n if isinstance(nxt, str) and nxt.startswith('<pointer:'):\n nxt = 'z' + nxt[9:-1]\n fol = nodes[i+1]\n # is not expansion\n if isinstance(fol, str) and (fol.startswith(':') or (fol == ')')):\n nodes_.append(nxt)\n else:\n if self.remove_pars:\n nodes_.append('(')\n else:\n if nodes_[-1] != '(':\n nodes_.append('(')\n #pass\n nodes_.append(nxt)\n nodes_.append('/')\n else:\n nodes_.append(nxt)\n i += 1\n nodes = nodes_\n\n i = 0\n nodes_ = []\n while i < (len(nodes) - 1):\n if nodes[i] == ':':\n nodes_.append(nodes[i] + nodes[i+1])\n i += 2\n last = False\n else:\n nodes_.append(nodes[i])\n i += 1\n last = True\n if last:\n nodes_.append(nodes[-1])\n nodes = nodes_\n\n i = 0\n nodes_ = []\n while i < (len(nodes)):\n if i < 2:\n nodes_.append(nodes[i])\n i += 1\n elif nodes_[-2] == '/' and nodes[i] == '/':\n i += 2\n else:\n nodes_.append(nodes[i])\n i += 1\n nodes = nodes_\n\n i = 0\n newvars = 0\n variables = set()\n remap = {}\n nodes_ = []\n while i < (len(nodes)):\n\n next = nodes[i]\n\n if next == '/':\n last = nodes_[-1]\n if last in variables:\n last_remap = f\"z{newvars+1000}\"\n newvars += 1\n nodes_[-1] = last_remap\n remap[last] = last_remap\n variables.add(last)\n nodes_.append(next)\n\n elif self._classify(next) == 'VAR' and next in remap and (i < len(nodes) - 1) and nodes[i+1] != '/':\n next = remap[next]\n nodes_.append(next)\n\n else:\n nodes_.append(next)\n\n i += 1\n\n nodes = nodes_\n pieces_ = []\n open_cnt = 0\n closed_cnt = 0\n if nodes[0] != '(':\n pieces_.append('(')\n open_cnt += 1\n for p in nodes:\n if p == '(':\n open_cnt += 1\n elif p == ')':\n closed_cnt += 1\n pieces_.append(p)\n if open_cnt == closed_cnt:\n break\n nodes = pieces_ + [')'] * (open_cnt - closed_cnt)\n\n pieces = []\n for piece in nodes:\n if not pieces:\n pieces.append('(')\n else:\n piece = str(piece)\n if piece.startswith('\"') or piece.startswith('\"') or '\"' in piece.strip('\"'):\n piece = '\"' + piece.replace('\"', '') + '\"'\n\n prev = self._classify(pieces[-1])\n next = self._classify(piece)\n\n if next == 'CONST':\n quote = False\n for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\\\', '_', '='):\n if char in piece:\n quote = True\n break\n if quote:\n piece = '\"' + piece.strip('\"') + '\"'\n\n if prev == '(':\n if next in ('VAR', 'I'):\n pieces.append(piece)\n elif prev == ')':\n if next in (')', 'EDGE', 'MODE'):\n pieces.append(piece)\n elif prev == 'VAR':\n if next in ('/', 'EDGE', 'MODE', ')'):\n pieces.append(piece)\n elif prev == '/':\n if next in ('INST', 'I'):\n pieces.append(piece)\n elif prev == 'INST':\n if next in (')', 'EDGE', 'MODE'):\n pieces.append(piece)\n elif prev == 'I':\n if next in ('/', ')', 'EDGE', 'MODE'):\n pieces.append(piece)\n elif prev == 'EDGE':\n if next in ('(', 'VAR', 'CONST', 'I'):\n pieces.append(piece)\n elif next == ')':\n pieces[-1] = piece\n elif next in ('EDGE', 'MODE'):\n pieces[-1] = piece\n elif prev == 'MODE':\n if next == 'INST':\n pieces.append(piece)\n elif prev == 'CONST':\n if next in (')', 'EDGE', 'MODE'):\n pieces.append(piece)\n\n pieces_ = []\n open_cnt = 0\n closed_cnt = 0\n if pieces[0] != '(':\n pieces_.append('(')\n open_cnt += 1\n for p in pieces:\n if p == '(':\n open_cnt += 1\n elif p == ')':\n closed_cnt += 1\n pieces_.append(p)\n if open_cnt == closed_cnt:\n break\n pieces = pieces_ + [')'] * (open_cnt - closed_cnt)\n\n linearized = re.sub(r'\\s+', ' ', ' '.join(pieces)).strip()\n\n \"\"\"\n line = linearized\n # make sure parentheses match\n # copied from https://github.com/RikVN/AMR/blob/master/restoreAMR/restore_amr.py\n open_count = 0\n close_count = 0\n for i, c in enumerate(line):\n if c == '(':\n open_count += 1\n elif c == ')':\n close_count += 1\n if open_count == close_count and open_count > 0:\n line = line[:i].strip()\n break\n old_line = line\n while True:\n open_count = len(re.findall(r'\\(', line))\n close_count = len(re.findall(r'\\)', line))\n if open_count > close_count:\n line += ')' * (open_count - close_count)\n elif close_count > open_count:\n for i in range(close_count - open_count):\n line = line.rstrip(')')\n line = line.rstrip(' ')\n if old_line == line:\n break\n old_line = line\n \"\"\"\n\n graph = penman.decode(linearized + ' ')\n triples = []\n newvars = 2000\n for triple in graph.triples:\n x, rel, y = triple\n if x is None:\n pass\n elif rel == ':instance' and y is None:\n triples.append(penman.Triple(x, rel, 'thing'))\n elif y is None:\n var = f'z{newvars}'\n newvars += 1\n triples.append(penman.Triple(x, rel, var))\n triples.append(penman.Triple(var, ':instance', 'thing'))\n else:\n triples.append(triple)\n graph = penman.Graph(triples)\n linearized = encode(graph)\n\n def fix_text(linearized=linearized):\n n = 0\n def _repl1(match):\n nonlocal n\n out = match.group(1) + match.group(2) + str(3000 + n) + ' / ' + match.group(2) + match.group(3)\n n += 1\n return out\n linearized = re.sub(r'(\\(\\s?)([a-z])([^\\/:\\)]+[:\\)])', _repl1, linearized,\n flags=re.IGNORECASE | re.MULTILINE)\n\n def _repl2(match):\n return match.group(1)\n linearized = re.sub(r'(\\(\\s*[a-z][\\d+]\\s*\\/\\s*[^\\s\\)\\(:\\/]+\\s*)((?:/\\s*[^\\s\\)\\(:\\/]+\\s*)+)', _repl2,\n linearized,\n flags=re.IGNORECASE | re.MULTILINE)\n\n # adds a ':' to args w/o it\n linearized = re.sub(r'([^:])(ARG)', r'\\1 :\\2', linearized)\n\n # removes edges with no node\n # linearized = re.sub(r':[^\\s\\)\\(:\\/]+?\\s*\\)', ')', linearized, flags=re.MULTILINE)\n\n return linearized\n\n linearized = fix_text(linearized)\n g = penman.decode(linearized)\n return g\n \n def _classify(self, node):\n if not isinstance(node, str):\n return \"CONST\"\n elif node == 'i':\n return \"I\"\n elif re.match(r'^[a-z]\\d*$', node) is not None:\n return \"VAR\"\n elif node[0].isdigit():\n return \n elif node.startswith('\"') and node.endswith('\"'):\n return \"CONST\"\n elif node in ('+', '-'):\n return \"CONST\"\n elif node == ':mode':\n return 'MODE'\n elif node.startswith(':'):\n return \"EDGE\"\n elif node in ['/', '(', ')']:\n return node\n elif node[0].isalpha():\n for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\\\'):\n if char in node:\n return \"CONST\"\n return \"INST\"\n else:\n return 'CONST'" } ]
import torch import torch.nn as nn import ipdb, logging, re from .prefix_gen_bart import PrefixGenBartForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from transformers import T5ForConditionalGeneration, T5Tokenizer from transformers import BartForConditionalGeneration, AutoConfig, AutoModel from .AMRBART.AMRBartTokenizer import AMRBartTokenizer, AMRRobertaTokenizer from torch.nn import NLLLoss from transformers.modeling_outputs import Seq2SeqLMOutput
13,838
# lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias # batch x dec_sequence_length x vocab_size if input_ids is None: input_ids = self._cache_input_ids # batch x sequence_length try: assert input_ids.size(0) == outputs.encoder_last_hidden_state.size(0) # batch size except: ipdb.set_trace() cross_attentions = outputs.cross_attentions # This is in tuple format, and each of them is of shape (batch_size, num_heads, dec_sequence_length, enc_sequence_length). # This are the attentions weights of the decoder’s cross-attention layer, after the attention softmax. # This is for investigating why regularizer works. cross_attentions = torch.stack(cross_attentions[-1:], dim=1) # TODO: we can change the used layer here. cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate layers cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate heads # Now, "cross attentions" is of shape (batch_size, dec_sequence_length, enc_sequence_length) # For cases on using cross_prefix, we need to remove the prefix attention length if self.config.use_cross_prefix: cross_attentions = cross_attentions[:, :, self.config.prefix_length:] copy_words = input_ids.unsqueeze(1).repeat(1, cross_attentions.size(1), 1) #(batch, dec_sequence_length, enc_sequence_length) lm_logits = torch.scatter_add(outputs[0].new_zeros(outputs[0].size(0), outputs[0].size(1), self.config.vocab_size), 2, copy_words, cross_attentions) eps = 1e-7 lm_logits = torch.log(lm_logits+eps) masked_lm_loss = None if labels is not None: loss_fct = NLLLoss(ignore_index=-100) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) #### # For AMR Integration #### class AMRT5(nn.Module): def __init__(self, config): super().__init__() self.config = config self.model = (T5ForConditionalGeneration.from_pretrained(config.AMR_model_path)).encoder.cuda() self.max_graph_len = 512 # self.max_sent_len = 90 self.tokenizer = T5Tokenizer.from_pretrained('t5-base') def get_encoder_output(self, stripped_graphs): # Form encodings and tokenize input_text = ['%s' % graph for graph in stripped_graphs] input_encodings = self.tokenizer.batch_encode_plus(input_text, padding=True, truncation=True, max_length=self.max_graph_len, return_overflowing_tokens=True) # # Check if any graphs were truncated (requires return_overflowing_tokens=True) clip = [l > 0 for l in input_encodings['num_truncated_tokens']] if any(clip): print("overlength") # Convert to tensors input_ids = torch.LongTensor(input_encodings['input_ids']).cuda() attention_mask = torch.LongTensor(input_encodings['attention_mask']).cuda() # Get encoder outputs [batch_size, max_graph_length, 768] encoder_output = self.model(input_ids=input_ids, attention_mask=attention_mask) return encoder_output['last_hidden_state'], attention_mask class AMRBart(nn.Module): def __init__(self, config): super().__init__() self.config = config self.model_config = AutoConfig.from_pretrained(config.AMR_model_path) self.tokenizer = AMRBartTokenizer.from_pretrained(config.AMR_model_path) self.model = BartForConditionalGeneration.from_pretrained(config.AMR_model_path).cuda() self.max_graph_len = 512 self.model.resize_token_embeddings(len(self.tokenizer)) self.model = self.model.model.encoder def get_encoder_output(self, stripped_graphs): input_text = ['%s' % graph for graph in stripped_graphs] input_encodings = [ [self.tokenizer.bos_token_id, self.tokenizer.mask_token_id, self.tokenizer.eos_token_id] + [self.tokenizer.amr_bos_token_id] + self.tokenizer.tokenize_amr(itm.split())[:self.max_graph_len -5] + [self.tokenizer.amr_eos_token_id] for itm in input_text] # padding max_batch_length = max(len(x) for x in input_encodings) attention_mask = [[1]*len(x) + [0]*(max_batch_length - len(x)) for x in input_encodings] input_ids = [x + [self.tokenizer.pad_token_id]*(max_batch_length - len(x)) for x in input_encodings] # truncation if max_batch_length > self.max_graph_len: input_ids = [x[:self.max_graph_len] for x in input_ids] attention_mask = [x[:self.max_graph_len] for x in attention_mask] print("overlength") # Convert to tensors input_ids = torch.LongTensor(input_ids).cuda() attention_mask = torch.LongTensor(attention_mask).cuda() # Get encoder outputs [batch_size, max_graph_length, 1024] encoder_output = self.model(input_ids=input_ids, attention_mask=attention_mask) return encoder_output['last_hidden_state'], attention_mask class AMRRoberta(nn.Module): def __init__(self, config): super().__init__() self.config = config self.model_config = AutoConfig.from_pretrained(config.AMR_model_path)
# from transformers import BartForConditionalGeneration class CopyBartWithReg(PrefixGenBartForConditionalGeneration): def __init__(self, config): super().__init__(config) # If extra model/module, we need to initialize the module here. self.linear_copy = nn.Linear(self.config.d_model, 1) def forward( self, input_ids=None, prefix=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: print('decoder_input_shifting') decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, prefix=prefix, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias # batch x dec_sequence_length x vocab_size if input_ids is None: input_ids = self._cache_input_ids # batch x sequence_length try: assert input_ids.size(0) == outputs.encoder_last_hidden_state.size(0) # batch size except: ipdb.set_trace() cross_attentions = outputs.cross_attentions # This is in tuple format, and each of them is of shape (batch_size, num_heads, dec_sequence_length, enc_sequence_length). # This are the attentions weights of the decoder’s cross-attention layer, after the attention softmax. cross_attentions = torch.stack(cross_attentions[-1:], dim=1) # TODO: we can change the used layer here. cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate layers cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate heads # Now, "cross attentions" is of shape (batch_size, dec_sequence_length, enc_sequence_length) # For cases on using cross_prefix, we need to remove the prefix attention length if self.config.use_cross_prefix: cross_attentions = cross_attentions[:, :, self.config.prefix_length:] # Probability of copying p_ori = torch.sigmoid(self.linear_copy(outputs[0])) # Merge distribution original_word_pro = torch.softmax(lm_logits, dim=-1) * p_ori #[batch, dec_sequence_length, vocab_size] copy_words = input_ids.unsqueeze(1).repeat(1, cross_attentions.size(1), 1) #(batch, dec_sequence_length, enc_sequence_length) input_len = input_ids.size(1) lm_logits = torch.scatter_add(original_word_pro, 2, copy_words, cross_attentions[:,:,:input_len]*(1-p_ori)) eps = 1e-7 lm_logits = torch.log(lm_logits+eps) masked_lm_loss = None if labels is not None: # loss_fct = CrossEntropyLoss() # # masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) # masked_lm_loss = loss_fct(torch.flatten(cross_attentions, start_dim=0, end_dim=1), labels.view(-1)) loss_fct = NLLLoss(ignore_index=-100) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) # add regularizer to p_ori masked_lm_loss += torch.mean(p_ori) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) class CopyBart(PrefixGenBartForConditionalGeneration): def __init__(self, config): super().__init__(config) # If extra model/module, we need to initialize the module here. self.linear_copy = nn.Linear(self.config.d_model, 1) def forward( self, input_ids=None, prefix=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: print('decoder_input_shifting') decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, prefix=prefix, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias # batch x dec_sequence_length x vocab_size if input_ids is None: input_ids = self._cache_input_ids # batch x sequence_length try: assert input_ids.size(0) == outputs.encoder_last_hidden_state.size(0) # batch size except: ipdb.set_trace() cross_attentions = outputs.cross_attentions # This is in tuple format, and each of them is of shape (batch_size, num_heads, dec_sequence_length, enc_sequence_length). # This are the attentions weights of the decoder’s cross-attention layer, after the attention softmax. cross_attentions = torch.stack(cross_attentions[-1:], dim=1) # TODO: we can change the used layer here. cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate layers cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate heads # Now, "cross attentions" is of shape (batch_size, dec_sequence_length, enc_sequence_length) # For cases on using cross_prefix, we need to remove the prefix attention length if self.config.use_cross_prefix: cross_attentions = cross_attentions[:, :, self.config.prefix_length:] # Probability of copying p_ori = torch.sigmoid(self.linear_copy(outputs[0])) # Merge distribution original_word_pro = torch.softmax(lm_logits, dim=-1) * p_ori #[batch, dec_sequence_length, vocab_size] copy_words = input_ids.unsqueeze(1).repeat(1, cross_attentions.size(1), 1) #(batch, dec_sequence_length, enc_sequence_length) input_len = input_ids.size(1) lm_logits = torch.scatter_add(original_word_pro, 2, copy_words, cross_attentions[:,:,:input_len]*(1-p_ori)) eps = 1e-7 lm_logits = torch.log(lm_logits+eps) masked_lm_loss = None if labels is not None: # loss_fct = CrossEntropyLoss() # # masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) # masked_lm_loss = loss_fct(torch.flatten(cross_attentions, start_dim=0, end_dim=1), labels.view(-1)) loss_fct = NLLLoss(ignore_index=-100) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) class PureCopyBart(PrefixGenBartForConditionalGeneration): def __init__(self, config): super().__init__(config) def forward( self, input_ids=None, prefix=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: print('decoder_input_shifting') decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, prefix=prefix, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias # batch x dec_sequence_length x vocab_size if input_ids is None: input_ids = self._cache_input_ids # batch x sequence_length try: assert input_ids.size(0) == outputs.encoder_last_hidden_state.size(0) # batch size except: ipdb.set_trace() cross_attentions = outputs.cross_attentions # This is in tuple format, and each of them is of shape (batch_size, num_heads, dec_sequence_length, enc_sequence_length). # This are the attentions weights of the decoder’s cross-attention layer, after the attention softmax. # This is for investigating why regularizer works. cross_attentions = torch.stack(cross_attentions[-1:], dim=1) # TODO: we can change the used layer here. cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate layers cross_attentions = torch.mean(cross_attentions, dim=1) # aggregate heads # Now, "cross attentions" is of shape (batch_size, dec_sequence_length, enc_sequence_length) # For cases on using cross_prefix, we need to remove the prefix attention length if self.config.use_cross_prefix: cross_attentions = cross_attentions[:, :, self.config.prefix_length:] copy_words = input_ids.unsqueeze(1).repeat(1, cross_attentions.size(1), 1) #(batch, dec_sequence_length, enc_sequence_length) lm_logits = torch.scatter_add(outputs[0].new_zeros(outputs[0].size(0), outputs[0].size(1), self.config.vocab_size), 2, copy_words, cross_attentions) eps = 1e-7 lm_logits = torch.log(lm_logits+eps) masked_lm_loss = None if labels is not None: loss_fct = NLLLoss(ignore_index=-100) masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) #### # For AMR Integration #### class AMRT5(nn.Module): def __init__(self, config): super().__init__() self.config = config self.model = (T5ForConditionalGeneration.from_pretrained(config.AMR_model_path)).encoder.cuda() self.max_graph_len = 512 # self.max_sent_len = 90 self.tokenizer = T5Tokenizer.from_pretrained('t5-base') def get_encoder_output(self, stripped_graphs): # Form encodings and tokenize input_text = ['%s' % graph for graph in stripped_graphs] input_encodings = self.tokenizer.batch_encode_plus(input_text, padding=True, truncation=True, max_length=self.max_graph_len, return_overflowing_tokens=True) # # Check if any graphs were truncated (requires return_overflowing_tokens=True) clip = [l > 0 for l in input_encodings['num_truncated_tokens']] if any(clip): print("overlength") # Convert to tensors input_ids = torch.LongTensor(input_encodings['input_ids']).cuda() attention_mask = torch.LongTensor(input_encodings['attention_mask']).cuda() # Get encoder outputs [batch_size, max_graph_length, 768] encoder_output = self.model(input_ids=input_ids, attention_mask=attention_mask) return encoder_output['last_hidden_state'], attention_mask class AMRBart(nn.Module): def __init__(self, config): super().__init__() self.config = config self.model_config = AutoConfig.from_pretrained(config.AMR_model_path) self.tokenizer = AMRBartTokenizer.from_pretrained(config.AMR_model_path) self.model = BartForConditionalGeneration.from_pretrained(config.AMR_model_path).cuda() self.max_graph_len = 512 self.model.resize_token_embeddings(len(self.tokenizer)) self.model = self.model.model.encoder def get_encoder_output(self, stripped_graphs): input_text = ['%s' % graph for graph in stripped_graphs] input_encodings = [ [self.tokenizer.bos_token_id, self.tokenizer.mask_token_id, self.tokenizer.eos_token_id] + [self.tokenizer.amr_bos_token_id] + self.tokenizer.tokenize_amr(itm.split())[:self.max_graph_len -5] + [self.tokenizer.amr_eos_token_id] for itm in input_text] # padding max_batch_length = max(len(x) for x in input_encodings) attention_mask = [[1]*len(x) + [0]*(max_batch_length - len(x)) for x in input_encodings] input_ids = [x + [self.tokenizer.pad_token_id]*(max_batch_length - len(x)) for x in input_encodings] # truncation if max_batch_length > self.max_graph_len: input_ids = [x[:self.max_graph_len] for x in input_ids] attention_mask = [x[:self.max_graph_len] for x in attention_mask] print("overlength") # Convert to tensors input_ids = torch.LongTensor(input_ids).cuda() attention_mask = torch.LongTensor(attention_mask).cuda() # Get encoder outputs [batch_size, max_graph_length, 1024] encoder_output = self.model(input_ids=input_ids, attention_mask=attention_mask) return encoder_output['last_hidden_state'], attention_mask class AMRRoberta(nn.Module): def __init__(self, config): super().__init__() self.config = config self.model_config = AutoConfig.from_pretrained(config.AMR_model_path)
self.tokenizer = AMRRobertaTokenizer.from_pretrained(config.AMR_model_path)
2
2023-11-15 21:32:56+00:00
16k
ahayler/s4c
scripts/images/gen_img_custom.py
[ { "identifier": "BTSNet", "path": "models/bts/model/models_bts.py", "snippet": "class BTSNet(torch.nn.Module):\n def __init__(self, conf):\n super().__init__()\n\n self.d_min = conf.get(\"z_near\")\n self.d_max = conf.get(\"z_far\")\n\n self.learn_empty = conf.get(\"learn_empty\", True)\n self.empty_empty = conf.get(\"empty_empty\", False)\n self.inv_z = conf.get(\"inv_z\", True)\n\n self.color_interpolation = conf.get(\"color_interpolation\", \"bilinear\")\n self.code_mode = conf.get(\"code_mode\", \"z\")\n if self.code_mode not in [\"z\", \"distance\"]:\n raise NotImplementedError(f\"Unknown mode for positional encoding: {self.code_mode}\")\n\n self.encoder = make_backbone(conf[\"encoder\"])\n self.code_xyz = PositionalEncoding.from_conf(conf[\"code\"], d_in=3)\n\n self.flip_augmentation = conf.get(\"flip_augmentation\", False)\n\n self.return_sample_depth = conf.get(\"return_sample_depth\", False)\n\n self.sample_color = conf.get(\"sample_color\", True)\n\n d_in = self.encoder.latent_size + self.code_xyz.d_out\n d_out = 1 if self.sample_color else 4\n\n self._d_in = d_in\n self._d_out = d_out\n\n self.mlp_coarse = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=d_out)\n self.mlp_fine = make_mlp(conf[\"mlp_fine\"], d_in, d_out=d_out, allow_empty=True)\n\n # MLP for segmentation classes\n # TODO: Find the output dimensions automatically\n self.segmentation_mode = conf.get('segmentation_mode', None)\n if self.segmentation_mode == 'KITTI-360':\n self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=21)\n # self.mlp_segmentation = make_segnet(d_in=d_in, d_out=21, d_hidden_list=[64])\n elif self.segmentation_mode == 'panoptic_deeplab':\n # self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=19)\n self.mlp_segmentation = make_segnet(d_in=d_in, d_out=19, d_hidden_list=[64])\n # self.mlp_segmentation = make_intercept_model(d_in, d_out=21)\n\n if self.learn_empty:\n self.empty_feature = nn.Parameter(torch.randn((self.encoder.latent_size,), requires_grad=True))\n\n self._scale = 0\n\n def set_scale(self, scale):\n self._scale = scale\n\n def get_scale(self):\n return self._scale\n\n def compute_grid_transforms(self, *args, **kwargs):\n pass\n\n def encode(self, images, Ks, poses_c2w, ids_encoder=None, ids_render=None, images_alt=None, combine_ids=None):\n poses_w2c = torch.inverse(poses_c2w)\n\n if ids_encoder is None:\n images_encoder = images\n Ks_encoder = Ks\n poses_w2c_encoder = poses_w2c\n ids_encoder = list(range(len(images)))\n else:\n images_encoder = images[:, ids_encoder]\n Ks_encoder = Ks[:, ids_encoder]\n poses_w2c_encoder = poses_w2c[:, ids_encoder]\n\n if images_alt is not None:\n images = images_alt\n else:\n images = images * .5 + .5\n\n if ids_render is None:\n images_render = images\n Ks_render = Ks\n poses_w2c_render = poses_w2c\n ids_render = list(range(len(images)))\n else:\n images_render = images[:, ids_render]\n Ks_render = Ks[:, ids_render]\n poses_w2c_render = poses_w2c[:, ids_render]\n\n if combine_ids is not None:\n combine_ids = list(list(group) for group in combine_ids)\n get_combined = set(sum(combine_ids, []))\n for i in range(images.shape[1]):\n if i not in get_combined:\n combine_ids.append((i,))\n remap_encoder = {v: i for i, v in enumerate(ids_encoder)}\n remap_render = {v: i for i, v in enumerate(ids_render)}\n comb_encoder = [[remap_encoder[i] for i in group if i in ids_encoder] for group in combine_ids]\n comb_render = [[remap_render[i] for i in group if i in ids_render] for group in combine_ids]\n comb_encoder = [group for group in comb_encoder if len(group) > 0]\n comb_render = [group for group in comb_render if len(group) > 0]\n else:\n comb_encoder = None\n comb_render = None\n\n n, nv, c, h, w = images_encoder.shape\n c_l = self.encoder.latent_size\n\n if self.flip_augmentation and self.training:\n do_flip = (torch.rand(1) > .5).item()\n else:\n do_flip = False\n\n if do_flip:\n images_encoder = torch.flip(images_encoder, dims=(-1, ))\n\n image_latents_ms = self.encoder(images_encoder.view(n * nv, c, h, w))\n\n if do_flip:\n image_latents_ms = [torch.flip(il, dims=(-1, )) for il in image_latents_ms]\n\n _, _, h_, w_ = image_latents_ms[0].shape\n image_latents_ms = [F.interpolate(image_latents, (h_, w_)).view(n, nv, c_l, h_, w_) for image_latents in image_latents_ms]\n\n if torch.any(torch.isnan(torch.stack(image_latents_ms))):\n self.encoder(images_encoder.view(n * nv, c, h, w))\n # raise Exception(\"NaN in encoded features.\")\n\n self.grid_f_features = image_latents_ms\n self.grid_f_Ks = Ks_encoder\n self.grid_f_poses_w2c = poses_w2c_encoder\n self.grid_f_combine = comb_encoder\n\n self.grid_c_imgs = images_render\n self.grid_c_Ks = Ks_render\n self.grid_c_poses_w2c = poses_w2c_render\n self.grid_c_combine = comb_render\n\n def sample_features(self, xyz, use_single_featuremap=True):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_f_features[self._scale].shape\n\n # if use_single_featuremap:\n # nv = 1\n\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_f_poses_w2c[:, :nv, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_f_Ks[:, :nv] @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n if self.code_mode == \"z\":\n # Get z into [-1, 1] range\n if self.inv_z:\n z = (1 / z.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n z = (z - self.d_min) / (self.d_max - self.d_min)\n z = 2 * z - 1\n xyz_projected = torch.cat((xy, z), dim=-1)\n elif self.code_mode == \"distance\":\n if self.inv_z:\n distance = (1 / distance.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n distance = (distance - self.d_min) / (self.d_max - self.d_min)\n distance = 2 * distance - 1\n xyz_projected = torch.cat((xy, distance), dim=-1)\n xyz_code = self.code_xyz(xyz_projected.view(n * nv * n_pts, -1)).view(n, nv, n_pts, -1)\n\n feature_map = self.grid_f_features[self._scale][:, :nv]\n # These samples are from different scales\n if self.learn_empty:\n empty_feature_expanded = self.empty_feature.view(1, 1, 1, c).expand(n, nv, n_pts, c)\n\n sampled_features = F.grid_sample(feature_map.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=\"bilinear\", padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n\n if self.learn_empty:\n sampled_features[invalid.expand(-1, -1, -1, c)] = empty_feature_expanded[invalid.expand(-1, -1, -1, c)]\n\n sampled_features = torch.cat((sampled_features, xyz_code), dim=-1)\n\n # If there are multiple frames with predictions, reduce them.\n # TODO: Technically, this implementations should be improved if we use multiple frames.\n # The reduction should only happen after we perform the unprojection.\n\n if self.grid_f_combine is not None:\n invalid_groups = []\n sampled_features_groups = []\n\n for group in self.grid_f_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_features_groups.append(sampled_features[:, group])\n\n invalid_to_combine = invalid[:, group]\n features_to_combine = sampled_features[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n features_picked = torch.gather(features_to_combine, dim=1, index=indices.expand(-1, -1, -1, features_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_features_groups.append(features_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_features = torch.cat(sampled_features_groups, dim=1)\n\n if use_single_featuremap:\n sampled_features = sampled_features.mean(dim=1)\n invalid = torch.any(invalid, dim=1)\n\n return sampled_features, invalid\n\n def sample_colors(self, xyz):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_c_imgs.shape\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_c_poses_w2c[:, :, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_c_Ks @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n # This scales the x-axis into the right range.\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n sampled_colors = F.grid_sample(self.grid_c_imgs.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=self.color_interpolation, padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n assert not torch.any(torch.isnan(sampled_colors))\n\n if self.grid_c_combine is not None:\n invalid_groups = []\n sampled_colors_groups = []\n\n for group in self.grid_c_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_colors_groups.append(sampled_colors[:, group])\n continue\n\n invalid_to_combine = invalid[:, group]\n colors_to_combine = sampled_colors[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n colors_picked = torch.gather(colors_to_combine, dim=1, index=indices.expand(-1, -1, -1, colors_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_colors_groups.append(colors_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_colors = torch.cat(sampled_colors_groups, dim=1)\n\n if self.return_sample_depth:\n distance = distance.view(n, nv, n_pts, 1)\n sampled_colors = torch.cat((sampled_colors, distance), dim=-1)\n\n return sampled_colors, invalid\n\n def forward(self, xyz, coarse=True, viewdirs=None, far=False, only_density=False, predict_segmentation=False):\n \"\"\"\n Predict (r, g, b, sigma) at world space points xyz.\n Please call encode first!\n :param xyz (B, 3)\n B is batch of points (in rays)\n :param predict_segmentation, if true also return the segmentation distribution for all the points\n :return (B, 4) r g b sigma\n \"\"\"\n\n with profiler.record_function(\"model_inference\"):\n n, n_pts, _ = xyz.shape\n nv = self.grid_c_imgs.shape[1]\n\n if self.grid_c_combine is not None:\n nv = len(self.grid_c_combine)\n\n # Sampled features all has shape: scales [n, n_pts, c + xyz_code]\n sampled_features, invalid_features = self.sample_features(xyz, use_single_featuremap=not only_density) # invalid features (n, n_pts, 1)\n sampled_features = sampled_features.reshape(n * n_pts, -1)\n\n mlp_input = sampled_features.view(n, n_pts, -1)\n\n # Camera frustum culling stuff, currently disabled\n combine_index = None\n dim_size = None\n\n # Run main NeRF network\n if coarse or self.mlp_fine is None:\n mlp_output = self.mlp_coarse(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n else:\n mlp_output = self.mlp_fine(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n\n segs = None\n if predict_segmentation:\n segs = self.mlp_segmentation(mlp_input)\n # print(next(self.mlp_segmentation.parameters()))\n # softmax to get a class distribution\n segs = F.softmax(segs, dim=2)\n # (n, pts, c) -> (n, n_pts, c)\n mlp_output = mlp_output.reshape(n, n_pts, self._d_out)\n\n if self.sample_color:\n sigma = mlp_output[..., :1]\n sigma = F.softplus(sigma)\n rgb, invalid_colors = self.sample_colors(xyz) # (n, nv, pts, 3)\n else:\n sigma = mlp_output[..., :1]\n sigma = F.relu(sigma)\n rgb = mlp_output[..., 1:4].reshape(n, 1, n_pts, 3)\n rgb = F.sigmoid(rgb)\n invalid_colors = invalid_features.unsqueeze(-2)\n nv = 1\n\n if self.empty_empty:\n sigma[invalid_features[..., 0]] = 0\n # TODO: Think about this!\n # Since we don't train the colors directly, lets use softplus instead of relu\n\n if not only_density:\n _, _, _, c = rgb.shape\n rgb = rgb.permute(0, 2, 1, 3).reshape(n, n_pts, nv * c) # (n, pts, nv * 3)\n invalid_colors = invalid_colors.permute(0, 2, 1, 3).reshape(n, n_pts, nv)\n\n invalid = invalid_colors | invalid_features # Invalid features gets broadcasted to (n, n_pts, nv)\n invalid = invalid.to(rgb.dtype)\n else:\n rgb = torch.zeros((n, n_pts, nv * 3), device=sigma.device)\n invalid = invalid_features.to(sigma.dtype)\n\n if predict_segmentation:\n return rgb, invalid, sigma, segs\n else:\n return rgb, invalid, sigma" }, { "identifier": "ImageRaySampler", "path": "models/bts/model/ray_sampler.py", "snippet": "class ImageRaySampler(RaySampler):\n def __init__(self, z_near, z_far, height=None, width=None, channels=3, norm_dir=True):\n self.z_near = z_near\n self.z_far = z_far\n self.height = height\n self.width = width\n self.channels = channels\n self.norm_dir = norm_dir\n\n def sample(self, images, poses, projs, segs=None, sample_segs=False):\n n, v, _, _ = poses.shape\n\n if self.height is None:\n self.height, self.width = images.shape[-2:]\n\n all_rgb_gt = []\n all_rays = []\n all_segs_gt = []\n\n for n_ in range(n):\n focals = projs[n_, :, [0, 1], [0, 1]]\n centers = projs[n_, :, [0, 1], [2, 2]]\n\n rays = util.gen_rays(poses[n_].view(-1, 4, 4), self.width, self.height, focal=focals, c=centers, z_near=self.z_near, z_far=self.z_far, norm_dir=self.norm_dir).view(-1, 8)\n all_rays.append(rays)\n\n if images is not None:\n rgb_gt = images[n_].view(-1, self.channels, self.height, self.width)\n rgb_gt = (rgb_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, self.channels))\n all_rgb_gt.append(rgb_gt)\n\n if sample_segs:\n segs_gt = segs[n_].view(-1, 1, self.height, self.width)\n segs_gt = (segs_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, 1))\n all_segs_gt.append(segs_gt)\n\n all_rays = torch.stack(all_rays)\n if images is not None:\n all_rgb_gt = torch.stack(all_rgb_gt)\n else:\n all_rgb_gt = None\n\n if sample_segs:\n all_segs_gt = torch.stack(all_segs_gt)\n # the None accounts for the patch_to_image\n return all_rays, all_rgb_gt, all_segs_gt, None\n else:\n return all_rays, all_rgb_gt\n\n def reconstruct(self, render_dict, channels=None, reconstruct_segmentation=False):\n coarse = render_dict[\"coarse\"]\n fine = render_dict[\"fine\"]\n\n if channels is None:\n channels = self.channels\n\n if reconstruct_segmentation:\n c_segmentation = coarse[\"segs\"]\n # c_segmentation_raw = coarse[\"segs_raw\"]\n n_classes = c_segmentation.shape[-1]\n # n_samples = c_segmentation_raw.shape[-2]\n\n c_rgb = coarse[\"rgb\"] # n, n_pts, v * 3\n c_weights = coarse[\"weights\"]\n c_depth = coarse[\"depth\"]\n c_invalid = coarse[\"invalid\"]\n\n f_rgb = fine[\"rgb\"] # n, n_pts, v * 3\n f_weights = fine[\"weights\"]\n f_depth = fine[\"depth\"]\n f_invalid = fine[\"invalid\"]\n\n n, n_pts, v_c = c_rgb.shape\n v_in = n_pts // (self.height * self.width)\n v_render = v_c // channels\n c_n_smps = c_weights.shape[-1]\n f_n_smps = f_weights.shape[-1]\n # (This can be a different v from the sample method)\n\n if reconstruct_segmentation:\n coarse[\"segs\"] = c_segmentation.view(n, v_in, self.height, self.width, n_classes)\n # coarse[\"segs_raw\"] = c_segmentation_raw.view(n, v_in, self.height, self.width, n_samples, n_classes)\n\n coarse[\"rgb\"] = c_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n coarse[\"weights\"] = c_weights.view(n, v_in, self.height, self.width, c_n_smps)\n coarse[\"depth\"] = c_depth.view(n, v_in, self.height, self.width)\n coarse[\"invalid\"] = c_invalid.view(n, v_in, self.height, self.width, c_n_smps, v_render)\n\n fine[\"rgb\"] = f_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n fine[\"weights\"] = f_weights.view(n, v_in, self.height, self.width, f_n_smps)\n fine[\"depth\"] = f_depth.view(n, v_in, self.height, self.width)\n fine[\"invalid\"] = f_invalid.view(n, v_in, self.height, self.width, f_n_smps, v_render)\n\n if \"alphas\" in coarse:\n c_alphas = coarse[\"alphas\"]\n f_alphas = fine[\"alphas\"]\n coarse[\"alphas\"] = c_alphas.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"alphas\"] = f_alphas.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"z_samps\" in coarse:\n c_z_samps = coarse[\"z_samps\"]\n f_z_samps = fine[\"z_samps\"]\n coarse[\"z_samps\"] = c_z_samps.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"z_samps\"] = f_z_samps.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"rgb_samps\" in coarse:\n c_rgb_samps = coarse[\"rgb_samps\"]\n f_rgb_samps = fine[\"rgb_samps\"]\n coarse[\"rgb_samps\"] = c_rgb_samps.view(n, v_in, self.height, self.width, c_n_smps, v_render, channels)\n fine[\"rgb_samps\"] = f_rgb_samps.view(n, v_in, self.height, self.width, f_n_smps, v_render, channels)\n\n render_dict[\"coarse\"] = coarse\n render_dict[\"fine\"] = fine\n\n if \"rgb_gt\" in render_dict:\n rgb_gt = render_dict[\"rgb_gt\"]\n render_dict[\"rgb_gt\"] = rgb_gt.view(n, v_in, self.height, self.width, channels)\n\n return render_dict" }, { "identifier": "NeRFRenderer", "path": "models/common/render/nerf.py", "snippet": "class NeRFRenderer(torch.nn.Module):\n \"\"\"\n NeRF differentiable renderer\n :param n_coarse number of coarse (binned uniform) samples\n :param n_fine number of fine (importance) samples\n :param n_fine_depth number of expected depth samples\n :param noise_std noise to add to sigma. We do not use it\n :param depth_std noise for depth samples\n :param eval_batch_size ray batch size for evaluation\n :param white_bkgd if true, background color is white; else black\n :param lindisp if to use samples linear in disparity instead of distance\n :param sched ray sampling schedule. list containing 3 lists of equal length.\n sched[0] is list of iteration numbers,\n sched[1] is list of coarse sample numbers,\n sched[2] is list of fine sample numbers\n \"\"\"\n\n def __init__(\n self,\n n_coarse=128,\n n_fine=0,\n n_fine_depth=0,\n noise_std=0.0,\n depth_std=0.01,\n eval_batch_size=100000,\n white_bkgd=False,\n lindisp=False,\n sched=None, # ray sampling schedule for coarse and fine rays\n hard_alpha_cap=False\n ):\n super().__init__()\n self.n_coarse = n_coarse\n self.n_fine = n_fine\n self.n_fine_depth = n_fine_depth\n\n self.noise_std = noise_std\n self.depth_std = depth_std\n\n self.eval_batch_size = eval_batch_size\n self.white_bkgd = white_bkgd\n self.lindisp = lindisp\n if lindisp:\n print(\"Using linear displacement rays\")\n self.using_fine = n_fine > 0\n self.sched = sched\n if sched is not None and len(sched) == 0:\n self.sched = None\n self.register_buffer(\n \"iter_idx\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.register_buffer(\n \"last_sched\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.hard_alpha_cap = hard_alpha_cap\n\n def sample_coarse(self, rays):\n \"\"\"\n Stratified sampling. Note this is different from original NeRF slightly.\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :return (B, Kc)\n \"\"\"\n device = rays.device\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n\n step = 1.0 / self.n_coarse\n B = rays.shape[0]\n z_steps = torch.linspace(0, 1 - step, self.n_coarse, device=device) # (Kc)\n z_steps = z_steps.unsqueeze(0).repeat(B, 1) # (B, Kc)\n z_steps += torch.rand_like(z_steps) * step\n if not self.lindisp: # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n return 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kc)\n\n def sample_coarse_from_dist(self, rays, weights, z_samp):\n device = rays.device\n B = rays.shape[0]\n\n num_bins = weights.shape[-1]\n num_samples = self.n_coarse\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(B, num_samples, dtype=torch.float32, device=device) # (B, Kf)\n interval_ids = torch.searchsorted(cdf, u, right=True) - 1 # (B, Kf)\n interval_ids = torch.clamp(interval_ids, 0, num_samples-1)\n interval_interp = torch.rand_like(interval_ids, dtype=torch.float32)\n\n # z_samps describe the centers of the respective histogram bins. Therefore, we have to extend them to the left and right\n if self.lindisp:\n z_samp = 1 / z_samp\n\n centers = .5 * (z_samp[:, 1:] + z_samp[:, :-1])\n interval_borders = torch.cat((z_samp[:, :1], centers, z_samp[:, -1:]), dim=-1)\n\n left_border = torch.gather(interval_borders, dim=-1, index=interval_ids)\n right_border = torch.gather(interval_borders, dim=-1, index=interval_ids+1)\n\n z_samp_new = left_border * (1 - interval_interp) + right_border * interval_interp\n\n if self.lindisp:\n z_samp_new = 1 / z_samp_new\n\n assert not torch.any(torch.isnan(z_samp_new))\n\n return z_samp_new\n\n def sample_fine(self, rays, weights):\n \"\"\"min\n Weighted stratified (importance) sample\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param weights (B, Kc)\n :return (B, Kf-Kfd)\n \"\"\"\n device = rays.device\n B = rays.shape[0]\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(\n B, self.n_fine - self.n_fine_depth, dtype=torch.float32, device=device\n ) # (B, Kf)\n inds = torch.searchsorted(cdf, u, right=True).float() - 1.0 # (B, Kf)\n inds = torch.clamp_min(inds, 0.0)\n\n z_steps = (inds + torch.rand_like(inds)) / self.n_coarse # (B, Kf)\n\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n if not self.lindisp: # Use linear sampling in depth space\n z_samp = near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n z_samp = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def sample_fine_depth(self, rays, depth):\n \"\"\"\n Sample around specified depth\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param depth (B)\n :return (B, Kfd)\n \"\"\"\n z_samp = depth.unsqueeze(1).repeat((1, self.n_fine_depth))\n z_samp += torch.randn_like(z_samp) * self.depth_std\n # Clamp does not support tensor bounds\n z_samp = torch.max(torch.min(z_samp, rays[:, -1:]), rays[:, -2:-1])\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def composite(self, model, rays, z_samp, coarse=True, sb=0, predict_segmentation=False):\n \"\"\"\n Render RGB and depth for each ray using NeRF alpha-compositing formula,\n given sampled positions along each ray (see sample_*)\n :param model should return (B, (r, g, b, sigma)) when called with (B, (x, y, z))\n should also support 'coarse' boolean argument\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param z_samp z positions sampled for each ray (B, K)\n :param coarse whether to evaluate using coarse NeRF\n :param predict_segmentation if true also predict the semantic distribution\n :param sb super-batch dimension; 0 = disable\n :return weights (B, K), rgb (B, 3), depth (B)\n \"\"\"\n with profiler.record_function(\"renderer_composite\"):\n B, K = z_samp.shape\n\n deltas = z_samp[:, 1:] - z_samp[:, :-1] # (B, K-1)\n delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # infty (B, 1)\n # delta_inf = rays[:, -1:] - z_samp[:, -1:]\n deltas = torch.cat([deltas, delta_inf], -1) # (B, K)\n\n # (B, K, 3)\n points = rays[:, None, :3] + z_samp.unsqueeze(2) * rays[:, None, 3:6]\n points = points.reshape(-1, 3) # (B*K, 3)\n\n use_viewdirs = hasattr(model, \"use_viewdirs\") and model.use_viewdirs\n\n rgbs_all, invalid_all, sigmas_all, segs_all = [], [], [], []\n if sb > 0:\n points = points.reshape(\n sb, -1, 3\n ) # (SB, B'*K, 3) B' is real ray batch size\n eval_batch_size = (self.eval_batch_size - 1) // sb + 1\n eval_batch_dim = 1\n else:\n eval_batch_size = self.eval_batch_size\n eval_batch_dim = 0\n\n split_points = torch.split(points, eval_batch_size, dim=eval_batch_dim)\n if use_viewdirs:\n dim1 = K\n viewdirs = rays[:, None, 3:6].expand(-1, dim1, -1) # (B, K, 3)\n if sb > 0:\n viewdirs = viewdirs.reshape(sb, -1, 3) # (SB, B'*K, 3)\n else:\n viewdirs = viewdirs.reshape(-1, 3) # (B*K, 3)\n split_viewdirs = torch.split(\n viewdirs, eval_batch_size, dim=eval_batch_dim\n )\n for pnts, dirs in zip(split_points, split_viewdirs):\n rgbs, invalid, sigmas = model(pnts, coarse=coarse, viewdirs=dirs)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n else:\n for pnts in split_points:\n if predict_segmentation:\n rgbs, invalid, sigmas, segs = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n segs_all.append(segs)\n else:\n rgbs, invalid, sigmas = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n points = None\n viewdirs = None\n # (B*K, 4) OR (SB, B'*K, 4)\n rgbs = torch.cat(rgbs_all, dim=eval_batch_dim)\n invalid = torch.cat(invalid_all, dim=eval_batch_dim)\n sigmas = torch.cat(sigmas_all, dim=eval_batch_dim)\n\n if predict_segmentation:\n segs = torch.cat(segs_all, dim=eval_batch_dim)\n segs = segs.reshape(B, K, -1) # (B, K, n_classes)\n\n rgbs = rgbs.reshape(B, K, -1) # (B, K, 4 or 5)\n invalid = invalid.reshape(B, K, -1)\n sigmas = sigmas.reshape(B, K)\n\n if self.training and self.noise_std > 0.0:\n sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std\n\n alphas = 1 - torch.exp(-deltas.abs() * torch.relu(sigmas)) # (B, K) (delta should be positive anyways)\n\n if self.hard_alpha_cap:\n alphas[:, -1] = 1\n\n deltas = None\n sigmas = None\n alphas_shifted = torch.cat(\n [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1\n ) # (B, K+1) = [1, a1, a2, ...]\n T = torch.cumprod(alphas_shifted, -1) # (B)\n weights = alphas * T[:, :-1] # (B, K)\n # alphas = None\n alphas_shifted = None\n\n rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2) # (B, 3)\n depth_final = torch.sum(weights * z_samp, -1) # (B)\n\n\n\n if self.white_bkgd:\n # White background\n pix_alpha = weights.sum(dim=1) # (B), pixel alpha\n rgb_final = rgb_final + 1 - pix_alpha.unsqueeze(-1) # (B, 3)\n\n if predict_segmentation:\n segs_final = torch.sum(weights.unsqueeze(-1) * segs, dim=-2) # (B, n_classes)\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs,\n # segs,\n segs_final\n )\n else:\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs\n )\n\n def forward(\n self, model, rays, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, predict_segmentation=False, sample_from_dist=None):\n \"\"\"\n :model nerf model, should return (SB, B, (r, g, b, sigma))\n when called with (SB, B, (x, y, z)), for multi-object:\n SB = 'super-batch' = size of object batch,\n B = size of per-object ray batch.\n Should also support 'coarse' boolean argument for coarse NeRF.\n :param rays ray spec [origins (3), directions (3), near (1), far (1)] (SB, B, 8)\n :param want_weights if true, returns compositing weights (SB, B, K)\n :param predict_segmentation if true, return the segmentation class distribution for each pixel\n :return render dict\n \"\"\"\n with profiler.record_function(\"renderer_forward\"):\n if self.sched is not None and self.last_sched.item() > 0:\n self.n_coarse = self.sched[1][self.last_sched.item() - 1]\n self.n_fine = self.sched[2][self.last_sched.item() - 1]\n\n assert len(rays.shape) == 3\n superbatch_size = rays.shape[0]\n rays = rays.reshape(-1, 8) # (SB * B, 8)\n\n if sample_from_dist is None:\n z_coarse = self.sample_coarse(rays) # (B, Kc)\n else:\n prop_weights, prop_z_samp = sample_from_dist\n n_samples = prop_weights.shape[-1]\n prop_weights = prop_weights.reshape(-1, n_samples)\n prop_z_samp = prop_z_samp.reshape(-1, n_samples)\n z_coarse = self.sample_coarse_from_dist(rays, prop_weights, prop_z_samp)\n z_coarse, _ = torch.sort(z_coarse, dim=-1)\n\n coarse_composite = self.composite(\n model, rays, z_coarse, coarse=True, sb=superbatch_size, predict_segmentation=predict_segmentation\n )\n\n outputs = DotMap(\n coarse=self._format_outputs(\n coarse_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas,\n want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps, want_segmentation=predict_segmentation\n ),\n )\n\n if self.using_fine:\n all_samps = [z_coarse]\n if self.n_fine - self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine(rays, coarse_composite[0].detach())\n ) # (B, Kf - Kfd)\n if self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine_depth(rays, coarse_composite[2])\n ) # (B, Kfd)\n z_combine = torch.cat(all_samps, dim=-1) # (B, Kc + Kf)\n z_combine_sorted, argsort = torch.sort(z_combine, dim=-1)\n fine_composite = self.composite(\n model, rays, z_combine_sorted, coarse=False, sb=superbatch_size,\n )\n outputs.fine = self._format_outputs(\n fine_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas, want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps\n )\n\n return outputs\n\n def _format_outputs(\n self, rendered_outputs, superbatch_size, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, want_segmentation=False\n ):\n if want_segmentation:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps, segs_final = rendered_outputs\n else:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps = rendered_outputs\n\n n_smps = weights.shape[-1]\n out_d_rgb = rgb_final.shape[-1]\n out_d_i = invalid.shape[-1]\n\n if superbatch_size > 0:\n rgb_final = rgb_final.reshape(superbatch_size, -1, out_d_rgb)\n depth = depth.reshape(superbatch_size, -1)\n weights = weights.reshape(superbatch_size, -1, n_smps)\n alphas = alphas.reshape(superbatch_size, -1, n_smps)\n invalid = invalid.reshape(superbatch_size, -1, n_smps, out_d_i)\n z_samps = z_samps.reshape(superbatch_size, -1, n_smps)\n rgb_samps = rgb_samps.reshape(superbatch_size, -1, n_smps, out_d_rgb)\n\n if want_segmentation:\n out_segs = segs_final.shape[-1]\n segs_final = segs_final.reshape(superbatch_size, -1, out_segs)\n\n ret_dict = DotMap(rgb=rgb_final, depth=depth, invalid=invalid)\n if want_weights:\n ret_dict.weights = weights\n if want_alphas:\n ret_dict.alphas = alphas\n if want_z_samps:\n ret_dict.z_samps = z_samps\n if want_rgb_samps:\n ret_dict.rgb_samps = rgb_samps\n if want_segmentation:\n ret_dict.segs = segs_final\n # ret_dict.segs_raw = segs_raw\n return ret_dict\n\n def sched_step(self, steps=1):\n \"\"\"\n Called each training iteration to update sample numbers\n according to schedule\n \"\"\"\n if self.sched is None:\n return\n self.iter_idx += steps\n while (\n self.last_sched.item() < len(self.sched[0])\n and self.iter_idx.item() >= self.sched[0][self.last_sched.item()]\n ):\n self.n_coarse = self.sched[1][self.last_sched.item()]\n self.n_fine = self.sched[2][self.last_sched.item()]\n print(\n \"INFO: NeRF sampling resolution changed on schedule ==> c\",\n self.n_coarse,\n \"f\",\n self.n_fine,\n )\n self.last_sched += 1\n\n @classmethod\n def from_conf(cls, conf, white_bkgd=False, eval_batch_size=100000):\n return cls(\n conf.get(\"n_coarse\", 128),\n conf.get(\"n_fine\", 0),\n n_fine_depth=conf.get(\"n_fine_depth\", 0),\n noise_std=conf.get(\"noise_std\", 0.0),\n depth_std=conf.get(\"depth_std\", 0.01),\n white_bkgd=conf.get(\"white_bkgd\", white_bkgd),\n lindisp=conf.get(\"lindisp\", True),\n eval_batch_size=conf.get(\"eval_batch_size\", eval_batch_size),\n sched=conf.get(\"sched\", None),\n hard_alpha_cap=conf.get(\"hard_alpha_cap\", False)\n )\n\n def bind_parallel(self, net, gpus=None, simple_output=False):\n \"\"\"\n Returns a wrapper module compatible with DataParallel.\n Specifically, it renders rays with this renderer\n but always using the given network instance.\n Specify a list of GPU ids in 'gpus' to apply DataParallel automatically.\n :param net A PixelNeRF network\n :param gpus list of GPU ids to parallize to. If length is 1,\n does not parallelize\n :param simple_output only returns rendered (rgb, depth) instead of the \n full render output map. Saves data tranfer cost.\n :return torch module\n \"\"\"\n wrapped = _RenderWrapper(net, self, simple_output=simple_output)\n if gpus is not None and len(gpus) > 1:\n print(\"Using multi-GPU\", gpus)\n wrapped = torch.nn.DataParallel(wrapped, gpus, dim=1)\n return wrapped" }, { "identifier": "map_fn", "path": "utils/array_operations.py", "snippet": "def map_fn(batch, fn):\ndef to(data, device, non_blocking=True):\ndef set_requires_grad(nets, requires_grad=False):\ndef mask_mean(t: torch.Tensor, m: torch.Tensor, dim=None, keepdim=False):\ndef apply_crop(array, crop):\ndef shrink_mask(mask, shrink=3):\ndef get_mask(size, border=5, device=None):\ndef get_grid(H, W, normalize=True):\ndef detach(t):" }, { "identifier": "color_tensor", "path": "utils/plotting.py", "snippet": "def color_tensor(tensor: torch.Tensor, cmap, norm=False):\n if norm:\n tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min())\n map = plt.cm.get_cmap(cmap)\n tensor = torch.tensor(map(tensor.cpu().numpy()), device=tensor.device)[..., :3]\n return tensor" }, { "identifier": "color_segmentation_tensor", "path": "utils/plotting.py", "snippet": "def color_segmentation_tensor(segmentation, n_classes=21):\n \"\"\"\n Transform a tensor of class indicies ranging from 0 to n_classes-1 into a rgb tensor\n (add another dimension to the end of size 3).\n \"\"\"\n # https://matplotlib.org/stable/gallery/color/colormap_reference.html\n palette = plt.cm.plasma(np.linspace(0, 1, n_classes))\n palette = palette[:, :3] # RGBA -> RGB\n\n segmentation = palette[segmentation.view(-1).cpu()].reshape(*segmentation.shape, 3)\n\n return segmentation" } ]
import sys import matplotlib.pyplot as plt import torch from argparse import ArgumentParser from scripts.inference_setup import * from hydra import compose, initialize from omegaconf import OmegaConf from models.bts.model import BTSNet, ImageRaySampler from models.common.render import NeRFRenderer from utils.array_operations import map_fn, unsqueezer from utils.plotting import color_tensor, color_segmentation_tensor
13,253
dtype=torch.float32).view(1, 4, 4) proj = torch.tensor([ [ 0.7849, 0.0000, -0.0312, 0], [ 0.0000, 2.9391, 0.2701, 0], [ 0.0000, 0.0000, 1.0000, 0], [ 0.0000, 0.0000, 0.0000, 1], ], dtype=torch.float32).view(1, 4, 4) elif model == "KITTI-Raw": resolution = (192, 640) config_path = "exp_kitti_raw" cp_path = Path(f"out/kitti_raw/pretrained") cp_name = cp_path.name cp_path = next(cp_path.glob("training*.pt")) out_path = Path(f"media/img_custom/kitti-raw_{cp_name}") cam_incl_adjust = None proj = torch.tensor([ [ 1.1619, 0.0000, -0.0184, 0], [ 0.0000, 3.8482, -0.0781, 0], [ 0.0000, 0.0000, 1.0000, 0], [ 0.0000, 0.0000, 0.0000, 1] ], dtype=torch.float32).view(1, 4, 4) elif model == "RealEstate10K": resolution = (256, 384) config_path = "exp_re10k" cp_path = Path(f"out/re10k/pretrained") cp_name = cp_path.name cp_path = next(cp_path.glob("training*.pt")) out_path = Path(f"media/img_custom/re10k_{cp_name}") cam_incl_adjust = None proj = torch.tensor([ [1.0056, 0.0000, 0.0000, 0], [0.0000, 1.7877, 0.0000, 0], [0.0000, 0.0000, 1.0000, 0], [0.0000, 0.0000, 0.0000, 1], ], dtype=torch.float32).view(1, 4, 4) else: raise ValueError(f"Invalid model: {model}") initialize(version_base=None, config_path="../../configs", job_name="gen_imgs") config = compose(config_name=config_path, overrides=[]) print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) config = dict(config) if "segmentation_mode" in config.keys(): config["model_conf"] = dict(config["model_conf"]) config["model_conf"]["segmentation_mode"] = config["segmentation_mode"] net = BTSNet(config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() renderer.renderer.n_coarse = 64 renderer.renderer.lindisp = True class _Wrapper(nn.Module): def __init__(self): super().__init__() self.renderer = renderer _wrapper = _Wrapper() _wrapper.load_state_dict(cp["model"], strict=True) renderer.to(device) renderer.eval() ray_sampler = ImageRaySampler(config["model_conf"]["z_near"], config["model_conf"]["z_far"], *resolution, norm_dir=False) print("Load input image") assert os.path.exists(args.img) img = cv2.cvtColor(cv2.imread(args.img), cv2.COLOR_BGR2RGB).astype(np.float32) / 255. img = cv2.resize(img, (resolution[1], resolution[0])) img = torch.tensor(img).permute(2, 0, 1).unsqueeze(0).unsqueeze(0).to(device) * 2 - 1 img_name = os.path.basename(args.img).split(".")[0] with torch.no_grad(): poses = torch.eye(4).view(1, 1, 4, 4).to(device) projs = proj.view(1, 1, 4, 4).to(device)[:, :, :3, :3] net.encode(img, projs, poses, ids_encoder=[0], ids_render=[0]) net.set_scale(0) img_save = img[0, 0].permute(1, 2, 0).cpu() * .5 + .5 _, depth = render_poses(renderer, ray_sampler, poses[:, :1], projs[:, :1]) if s_profile: profile = render_profile(net, cam_incl_adjust) else: profile = None if s_profile_seg: profile_seg = render_segmentation_profile(net, cam_incl_adjust) else: profile_seg = None if s_profile_depth: profile_depth = render_depth_profile(net, cam_incl_adjust) else: None depth = ((1 / depth - 1 / config["model_conf"]["z_far"]) / (1 / config["model_conf"]["z_near"] - 1 / config["model_conf"]["z_far"])).clamp(0, 1) print(f"Generated " + str(out_path / f"{img_name}")) if s_img: save_plot(img_save.numpy(), str(out_path / f"{img_name}_in.png"), dry_run=dry_run) if s_depth:
sys.path.append(".") def main(): parser = ArgumentParser("Generate density field from single image.") parser.add_argument("--img", "-i", required=True, help="Path to the image.") parser.add_argument("--plot", "-p", action="store_true", help="Plot rather than save images.") parser.add_argument("--model", "-m", help="Path to the model you want to use.", required=True) args = parser.parse_args() s_img = True s_depth = True s_profile = True s_seg = True s_profile_seg = True s_profile_depth = True dry_run = args.plot cp_path = Path(args.model) model = "KITTI-360" if model == "KITTI-360": resolution = (192, 640) config_path = "exp_kitti_360" cp_name = cp_path.name cp_path = next(cp_path.glob("training*.pt")) out_path = Path(f"media/img_custom/kitti-360_{cp_name}") cam_incl_adjust = torch.tensor( [[1.0000000, 0.0000000, 0.0000000, 0], [0.0000000, 0.9961947, -0.0871557, 0], [0.0000000, 0.0871557, 0.9961947, 0], [0.0000000, 000000000, 0.0000000, 1] ], dtype=torch.float32).view(1, 4, 4) proj = torch.tensor([ [ 0.7849, 0.0000, -0.0312, 0], [ 0.0000, 2.9391, 0.2701, 0], [ 0.0000, 0.0000, 1.0000, 0], [ 0.0000, 0.0000, 0.0000, 1], ], dtype=torch.float32).view(1, 4, 4) elif model == "KITTI-Raw": resolution = (192, 640) config_path = "exp_kitti_raw" cp_path = Path(f"out/kitti_raw/pretrained") cp_name = cp_path.name cp_path = next(cp_path.glob("training*.pt")) out_path = Path(f"media/img_custom/kitti-raw_{cp_name}") cam_incl_adjust = None proj = torch.tensor([ [ 1.1619, 0.0000, -0.0184, 0], [ 0.0000, 3.8482, -0.0781, 0], [ 0.0000, 0.0000, 1.0000, 0], [ 0.0000, 0.0000, 0.0000, 1] ], dtype=torch.float32).view(1, 4, 4) elif model == "RealEstate10K": resolution = (256, 384) config_path = "exp_re10k" cp_path = Path(f"out/re10k/pretrained") cp_name = cp_path.name cp_path = next(cp_path.glob("training*.pt")) out_path = Path(f"media/img_custom/re10k_{cp_name}") cam_incl_adjust = None proj = torch.tensor([ [1.0056, 0.0000, 0.0000, 0], [0.0000, 1.7877, 0.0000, 0], [0.0000, 0.0000, 1.0000, 0], [0.0000, 0.0000, 0.0000, 1], ], dtype=torch.float32).view(1, 4, 4) else: raise ValueError(f"Invalid model: {model}") initialize(version_base=None, config_path="../../configs", job_name="gen_imgs") config = compose(config_name=config_path, overrides=[]) print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) config = dict(config) if "segmentation_mode" in config.keys(): config["model_conf"] = dict(config["model_conf"]) config["model_conf"]["segmentation_mode"] = config["segmentation_mode"] net = BTSNet(config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() renderer.renderer.n_coarse = 64 renderer.renderer.lindisp = True class _Wrapper(nn.Module): def __init__(self): super().__init__() self.renderer = renderer _wrapper = _Wrapper() _wrapper.load_state_dict(cp["model"], strict=True) renderer.to(device) renderer.eval() ray_sampler = ImageRaySampler(config["model_conf"]["z_near"], config["model_conf"]["z_far"], *resolution, norm_dir=False) print("Load input image") assert os.path.exists(args.img) img = cv2.cvtColor(cv2.imread(args.img), cv2.COLOR_BGR2RGB).astype(np.float32) / 255. img = cv2.resize(img, (resolution[1], resolution[0])) img = torch.tensor(img).permute(2, 0, 1).unsqueeze(0).unsqueeze(0).to(device) * 2 - 1 img_name = os.path.basename(args.img).split(".")[0] with torch.no_grad(): poses = torch.eye(4).view(1, 1, 4, 4).to(device) projs = proj.view(1, 1, 4, 4).to(device)[:, :, :3, :3] net.encode(img, projs, poses, ids_encoder=[0], ids_render=[0]) net.set_scale(0) img_save = img[0, 0].permute(1, 2, 0).cpu() * .5 + .5 _, depth = render_poses(renderer, ray_sampler, poses[:, :1], projs[:, :1]) if s_profile: profile = render_profile(net, cam_incl_adjust) else: profile = None if s_profile_seg: profile_seg = render_segmentation_profile(net, cam_incl_adjust) else: profile_seg = None if s_profile_depth: profile_depth = render_depth_profile(net, cam_incl_adjust) else: None depth = ((1 / depth - 1 / config["model_conf"]["z_far"]) / (1 / config["model_conf"]["z_near"] - 1 / config["model_conf"]["z_far"])).clamp(0, 1) print(f"Generated " + str(out_path / f"{img_name}")) if s_img: save_plot(img_save.numpy(), str(out_path / f"{img_name}_in.png"), dry_run=dry_run) if s_depth:
save_plot(color_tensor(depth, "magma", norm=True).numpy(), str(out_path / f"{img_name}_depth.png"), dry_run=dry_run)
4
2023-11-12 21:53:27+00:00
16k
TCLResearchEurope/torch-dag
torch_dag_algorithms/pruning/module_multipliers.py
[ { "identifier": "structured_modules", "path": "torch_dag/structured_modules.py", "snippet": "ACTIVATION_MODULES_T = Union[\n nn.ReLU,\n nn.ReLU6,\n nn.SiLU,\n nn.Softmax,\n nn.Sigmoid,\n nn.Hardswish,\n nn.Hardsigmoid,\n nn.GELU,\n nn.LeakyReLU,\n nn.ELU,\n nn.Tanh,\n nn.Identity,\n]\nACTIVATION_MODULES = get_args(ACTIVATION_MODULES_T) # -ish...\n B, C, H, W = x.shape\n B, C, H, W = x.shape\n B, N, C = x.shape\n H = W = math.isqrt(N)\n B, N, C = x.shape\n SPECS = (\n '(B,C,H,W)->(B,H*W,C)',\n '(B,N,C)->(B,N,target)',\n )\n PREDECESSOR_KEYWORD = 'predecessor'\n B, C, H, W = x.shape\n B, N, C = x.shape\n SPECS = (\n '(B,C,H,W)->(B,H*W,C)',\n '(B,N,C)->(B,N,target)',\n )\n PREDECESSOR_KEYWORD = 'predecessor'\n B, C, H, W = x.shape\n B, N, C = x.shape\n B, C, H, W = x.shape\n B, C, H, W = x.shape\n B, N, C = x.shape\n H = W = math.isqrt(N)\n B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)\n N = int(h * scale_factor)\n B, T, C, _ = q.size()\n PREDECESSOR_KEYWORD = 'predecessor'\n B, N, C = x.shape\n B, N, C = x.shape\ndef space_to_depth(x: torch.Tensor, block_size: int):\ndef depth_to_space(x: torch.Tensor, block_size: int):\n def build_activation_module(cls, activation_name):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(\n self,\n dim: int,\n ):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, perm: Tuple[int, ...]):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, dim0: int, dim1: int):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n dim,\n keepdim: bool,\n ):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n block_size: int,\n ):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, inputs: torch.Tensor, target_shape=None):\n def __init__(\n self,\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, inputs: torch.Tensor, target_shape=None):\n def forward(self, inputs: torch.Tensor):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(\n self,\n index: Union[int, Tuple[int, ...]],\n ):\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]):\n def calc_same_pad(self, i: int, k: int, s: int, d: int) -> int:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, slice_spec):\n def replace_ellipses_by_slices(slice_spec):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, transpose: bool, normalize: bool = True):\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def __init__(self, num_channels: int, use_bias: bool, weight_init_value: float = 1e-5):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: Union[torch.Tensor, List[torch.Tensor]]) -> torch.Tensor:\n def __init__(self, bn: torch.nn.BatchNorm1d):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, scalar):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, param: torch.nn.Parameter):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, p: str = 'fro', dim=None, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, dim, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, dim, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, split_size_or_sections, dim=0):\n def forward(self, x) -> List[torch.Tensor]:\n def __init__(\n self,\n spec: Union[str, Dict],\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def clear_custom_buffers(self):\n def forward(self, x) -> torch.Tensor:\n def __init__(\n self,\n spec: Union[str, Dict],\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, dim: int):\n def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, chunks, dim: int):\n def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, dim: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self):\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def __init__(self, start_dim: int = 0, end_dim: int = - 1):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, block_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, block_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(\n self,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n recompute_scale_factor=None,\n antialias=False,\n ):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, p=2.0, dim=1, ):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, pad: tuple, mode: str = 'constant', value: int = None):\n def forward(self, input):\n def __init__(self, ndim, bias):\n def forward(self, input):\n def forward(self, x):\n def forward(self, idx):\n def __init__(self, config):\n def forward(self, x):\n def __init__(self, scale_factor=2.0):\n def forward(self, x):\n def __init__(self,\n dim: int,\n num_heads: int,\n use_bias: bool = True,\n dropout_rate: float = 0.0,\n output_dropout_rate: float = 0.0,\n include_reshapings: bool = False,\n ):\n def forward(self, x: List[torch.Tensor]):\n def __init__(self, in_features: int, out_features):\n def forward(self, x):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def entropy_loss(self, epsilon: float = 0.01):\n def trainable_params(self):\n def fuse(self):\n def __init__(self, in_features: int, out_features, hidden_dim: int):\n def forward(self, x):\n def trainable_params(self):\n def __init__(self, in_features: int, out_features):\n def forward(self, x):\n def __init__(self, scale_factor: int, align_corners: bool = False):\n def forward(self, x):\n def __init__(\n self,\n num_heads: int,\n ):\n def forward(self, input: torch.Tensor):\n def __init__(\n self,\n num_heads: int,\n ):\n def forward(self, input: torch.Tensor):\n def __init__(self, in_features: int, out_features: int):\n def trainable_params(self):\n def non_logits_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01):\n def fuse(self):\n def __init__(self, in_features: int, out_features: int, bias: bool = True):\n def forward(self, x):\n def __init__(self, in_features: int, out_features: int):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01):\n def __init__(\n self,\n dim,\n num_ss_tokens: int,\n s_ratio: int = 4,\n use_bias: bool = True,\n activation=nn.ReLU(),\n ):\n def forward(self, x):\n def __init__(self, in_features: int, out_features: int, ks: int, padding, stride, bias: bool):\n def non_logits_params(self):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self) -> torch.Tensor:\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01) -> torch.Tensor:\n def fuse(self) -> Tuple[nn.Conv2d, nn.Conv2d]:\n def build_from_conv(cls, module: nn.Conv2d) -> \"DecomposedConv\":\n def __init__(self, in_features: int, out_features: int, bias: bool):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self) -> torch.Tensor:\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01) -> torch.Tensor:\n def fuse(self) -> Tuple[nn.Linear, nn.Linear]:\n def build_from_linear(cls, module: nn.Linear) -> \"DecomposedLinear\":\n def __init__(self, pow: Union[float, int]):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, dim: int):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, sizes: Union[torch.Size, int]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, dropout_p=0.0, is_causal: bool = False):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, function: Callable, spec: Dict):\n def _build_inputs(self, spec: Dict, inputs=None, inputs_counter: int = 0):\n def forward(self, inputs):\n def __init__(self, dim):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, arg):\n def forward(self, x):\n def __init__(self, shifts: Union[int, Tuple[int, ...]], dims: Union[int, Tuple[int, ...]] = None):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n index: Union[int, Tuple[int, ...]],\n ):\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n width_multiplier: float = 0.25,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_norm=False,\n attn_drop=0.,\n proj_drop=0.,\n norm_layer=nn.LayerNorm,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_norm=False,\n attn_drop=0.,\n proj_drop=0.,\n norm_layer=nn.LayerNorm,\n ):\n def forward(self, x):\n def convert_from_timm(cls, module: Attention):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n width_multiplier: float = 0.25,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n ):\n def forward(self, x):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n use_bias: bool = False,\n ):\n def rank(self) -> int:\n def s(self):\n def build_from_linear(cls, module: nn.Linear):\n def forward(self, x):\nclass ActivationModuleBuilder:\nclass EmptyModule(torch.nn.Module):\nclass AddModule(torch.nn.Module):\nclass SubModule(torch.nn.Module):\nclass MulModule(torch.nn.Module):\nclass DivModule(torch.nn.Module):\nclass ConcatModule(torch.nn.Module):\nclass PermuteModule(torch.nn.Module):\nclass TransposeModule(torch.nn.Module):\nclass GlobalMeanPool2DModule(torch.nn.Module):\nclass SpaceToDepthModule(torch.nn.Module):\nclass ReshapeModule(torch.nn.Module):\nclass ReshapeModuleV2(torch.nn.Module):\nclass PatchifyModule(torch.nn.Module):\nclass DePatchifyModule(torch.nn.Module):\nclass TensorMergerModule(torch.nn.Module):\nclass TensorExtractorModule(torch.nn.Module):\nclass Conv2DSameModule(torch.nn.Conv2d):\nclass SliceModule(torch.nn.Module):\nclass GetShapeModule(torch.nn.Module):\nclass GetShapeModuleV2(torch.nn.Module):\nclass TfMatmulModule(torch.nn.Module):\nclass MatmulModule(torch.nn.Module):\nclass ChannelAffineModule(torch.nn.Module):\nclass TfTokenizeModule(torch.nn.Module):\nclass TfDetokenizeModule(torch.nn.Module):\nclass TfBatchNorm1d(torch.nn.Module):\nclass ScalarMul(torch.nn.Module):\nclass ParameterModule(torch.nn.Module):\nclass NormModule(torch.nn.Module):\nclass MeanModule(torch.nn.Module):\nclass SumModule(torch.nn.Module):\nclass SplitModule(torch.nn.Module):\nclass ReshapeWithSpecModule(torch.nn.Module):\nclass ReshapeWithSpecModuleV2(torch.nn.Module):\nclass TokenizeModule(torch.nn.Module):\nclass DetokenizeModule(torch.nn.Module):\nclass UnbindModule(torch.nn.Module):\nclass ChunkModule(torch.nn.Module):\nclass AuxiliaryTokenModule(torch.nn.Module):\nclass ExpandAsModule(torch.nn.Module):\nclass FlattenModule(torch.nn.Module):\nclass DepthToSpaceModule(torch.nn.Module):\nclass SpaceToDepthModule(torch.nn.Module):\nclass InterpolateModule(torch.nn.Module):\nclass NormalizeModule(torch.nn.Module):\nclass PadModule(torch.nn.Module):\nclass LayerNormWithOptionalBias(nn.Module):\nclass GeluGPT(nn.Module):\nclass PositionalEmbeddingGPT(nn.Module):\nclass CausalSelfAttention(nn.Module):\nclass BilinearUpsampling(nn.Module):\nclass EfficientAttention(nn.Module):\nclass AdjustableQueryKeyMatmul(nn.Module):\nclass PreFusedAdjustableQueryKeyMatmul(nn.Module):\nclass FusedAdjustableQueryKeyMatmul(nn.Module):\nclass HalfPixelCentersFalseBilinearUpsample(nn.Module):\nclass MakeHeadsModule(torch.nn.Module):\nclass UnmakeHeadsModule(torch.nn.Module):\nclass SparseAdjustableLinear(nn.Module):\nclass SparseLinear(nn.Linear):\nclass DecomposedSparseLinear(nn.Module):\nclass StateSpaceAttentionV2(torch.nn.Module):\nclass DecomposedConv(nn.Module):\nclass DecomposedLinear(nn.Module):\nclass PowerModule(torch.nn.Module):\nclass UnsqueezeModule(torch.nn.Module):\nclass ExpandTokenModule(torch.nn.Module):\nclass AddcmulModule(torch.nn.Module):\nclass ScaledDotProductAttentionModule(torch.nn.Module):\nclass AutoWrapFunctionModule(torch.nn.Module):\nclass StackModule(torch.nn.Module):\nclass ArgModule(torch.nn.Module):\nclass RollModule(torch.nn.Module):\nclass GetItemModule(torch.nn.Module):\nclass StageZeroSllrcAttention(torch.nn.Module):\nclass Attention(nn.Module):\nclass BatchedAttention(torch.nn.Module):\nclass SllrcAttention(torch.nn.Module):\nclass MultiQueryAttention(torch.nn.Module):\nclass SvdLinear(nn.Module):" }, { "identifier": "DagModule", "path": "torch_dag/core/dag_module.py", "snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVertex] = None,\n ):\n super().__init__()\n self.name = name\n self.vertices = vertices if vertices is not None else []\n self.output_vertex = output_vertex\n self.forward_dict = None\n self.inputs_dict = None\n self.cache_forward_dict = False\n self._inner_modules = None\n self.forward_scaffold = {}\n self.output_index = None\n self.compiled = False\n self.update_inner_modules()\n\n def compile(self, inputs: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None):\n \"\"\"\n In general `forward` method of DagModule is not `torch.compile` friendly. To overcome that\n we need to use a modified implementation of the forward pass, with no cacheing of intermediate tensors.\n Additionally, some modules may require a compile-type step for `torch.compile` usage.\n :param inputs: optional input (a dummy tensor for a single forward pass)\n \"\"\"\n if inputs is not None:\n is_training = self.training\n if is_training:\n self.eval()\n _ = self(inputs)\n if is_training:\n self.train()\n\n self.forward_scaffold, self.output_index = self.get_forward_scaffold()\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n v.module.compile()\n self.compiled = True\n\n @property\n def inner_modules(self) -> torch.nn.ModuleList:\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n return self._inner_modules\n\n @property\n def input_vertices(self) -> List[InputVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InputVertex)]\n\n @property\n def inner_vertices(self) -> List[InnerVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InnerVertex)]\n\n def update_inner_modules(self):\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n for iv in self.inner_vertices:\n if isinstance(iv.module, DagModule):\n iv.module.update_inner_modules()\n\n def get_vertex_by_name(self, name: str) -> Union[InnerVertex, InputVertex]:\n result = [vertex for vertex in self.vertices if vertex.name == name]\n if len(result) == 1:\n return result[0]\n elif len(result) > 1:\n raise AssertionError(f'Multiple vertices found with name: {name} -> {result}')\n else:\n return\n\n def get_forward_scaffold(self):\n # a mapping between vertices index and its predecessors indices\n forward_scaffold = {}\n for k, vertex in enumerate(self.vertices):\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n predecessors_indices = [\n self.vertices.index(pd) for pd in predecessors\n ]\n forward_scaffold[k] = predecessors_indices\n\n output_index = self.vertices.index(self.output_vertex)\n\n return forward_scaffold, output_index\n\n def compiled_forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n\n assert self.compiled\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_list = [None for _ in range(len(self.vertices))]\n\n for k, input in enumerate(inputs):\n forward_list[k] = input\n\n num_inputs = len(inputs)\n\n for k in range(len(self.vertices)):\n if k < num_inputs:\n pass\n else:\n\n pd_indices = self.forward_scaffold[k]\n module_inputs = [forward_list[pd_index] for pd_index in pd_indices]\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n try:\n result = self.vertices[k].module(module_inputs)\n except (TypeError, AttributeError):\n result = self.vertices[k].module(*module_inputs)\n result = _postprocess_module_output(result)\n\n forward_list[k] = result\n\n return forward_list[self.output_index]\n\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n # this is for `torch.compile` usage\n if self.compiled:\n return self.compiled_forward(inputs)\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_dict = {}\n for k, v in enumerate(self.input_vertices):\n forward_dict[v] = inputs[k]\n\n # forward_dict = {vertex: tensor for vertex, tensor in zip(self.input_vertices, inputs)}\n inputs_dict = {}\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n module_inputs = [forward_dict[pd] for pd in predecessors]\n inputs_dict[vertex] = module_inputs\n\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n\n try:\n result = vertex.module(module_inputs)\n except (TypeError, AttributeError):\n result = vertex.module(*module_inputs)\n # if isinstance(result, Tuple):\n result = _postprocess_module_output(result)\n\n forward_dict[vertex] = result\n if self.cache_forward_dict:\n self.forward_dict = forward_dict\n self.inputs_dict = inputs_dict\n return forward_dict[self.output_vertex]\n\n def traverse(\n self,\n processor: VertexProcessor = None,\n ):\n if processor is None:\n inner_vertices = []\n for inner_vertex in self.inner_vertices:\n if isinstance(inner_vertex.module, DagModule):\n inner_vertices.extend(inner_vertex.module.traverse())\n inner_vertices.append(inner_vertex)\n return inner_vertices\n else:\n for inner_vertex in self.traverse():\n processor(inner_vertex)\n # TODO: Remove after validation\n # self._update_inner_modules()\n\n def _check_if_name_unique(self, name: str):\n if name in [v.name for v in self.vertices]:\n raise ValueError(\n f'{self.name} already has an Vertex with name {name}. Please use different name.'\n )\n\n def add_input_vertex(self, name: str) -> InputVertex:\n self._check_if_name_unique(name)\n input_vertex = InputVertex(name)\n self.vertices.append(input_vertex)\n return input_vertex\n\n def add_vertex(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ) -> InnerVertex:\n self._check_if_name_unique(name)\n assert isinstance(module, torch.nn.Module)\n\n inner_vertex = InnerVertex(\n name=name,\n module=module,\n predecessors=predecessors,\n )\n for predecessor in predecessors:\n if predecessor not in self.vertices:\n raise ValueError(f'The predecessor: {predecessor} of InnerVertex: {InnerVertex} is not in '\n f'the DagModule: {self.name}')\n self.vertices.append(inner_vertex)\n self.inner_modules.append(module)\n inner_vertex.dag_module = self\n return inner_vertex\n\n def __repr__(self):\n representation = f'{self.__class__.__name__}[{self.name}]'\n if len(self.vertices) == 0:\n return representation\n for inner_vertex in self.inner_vertices:\n inner_vertex.MAX_LEN_REPR = self.MAX_LEN_REPR\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n representation += f'\\n << {vertex.name} '\n else:\n index = self.inner_vertices.index(vertex)\n prefix = '>>' if vertex == self.output_vertex else '*'\n if isinstance(vertex.module, DagModule):\n prefix += '#'\n representation += f\"\\n{prefix} {index}: {vertex} \" \\\n f\"--> predecessors: {vertex.predecessors}, \" \\\n f\"successors: {vertex.successors}\"\n representation += f' {self.add_custom_module_info(vertex)}'\n for vertex in self.inner_vertices:\n vertex.MAX_LEN_REPR = None\n return representation\n\n def add_custom_module_info(self, vertex: InnerVertex):\n m = vertex.module\n if isinstance(m, torch.nn.Conv2d):\n return f'Conv2d(in={m.in_channels}, out={m.out_channels}, ks={m.kernel_size}, padding={m.padding})'\n if isinstance(m, torch.nn.Linear):\n return f'Linear(in={m.in_features}, out={m.out_features})'\n return ''\n\n def mark_current_top_vertex_as_output(self):\n if not self.inner_vertices:\n raise ValueError(f'One cannot mark top node in an empty {self}')\n if self.output_vertex is not None:\n logger.warning(f'{self} already has an output vertex. Replacing...')\n self.output_vertex = self.inner_vertices[-1]\n\n @property\n def module_classes(self) -> Set:\n return set([m.__class__ for m in self.inner_modules])\n\n def unroll_inner_modules(self) -> List[torch.nn.Module]:\n result = []\n for m in self.inner_modules:\n if not isinstance(m, DagModule):\n result.append(m)\n else:\n result.extend(m.unroll_inner_modules())\n return result\n\n def save(self, path: str):\n # TODO: Remove after validation\n # self._update_inner_modules()\n self.enforce_names_uniqueness()\n os.makedirs(path, exist_ok=True)\n atomic_modules = self.unroll_inner_modules()\n self.clear_custom_buffers()\n torch.save(torch.nn.ModuleList(atomic_modules), os.path.join(path, 'modules.pt'))\n with open(os.path.join(path, 'config.dict.json'), 'w') as f:\n json.dump(self.config_dict(), f)\n\n def clear_custom_buffers(self):\n for module in self.unroll_inner_modules():\n if hasattr(module, 'clear_custom_buffers'):\n module._buffers.clear()\n\n @classmethod\n def load(\n cls,\n path: str,\n map_location='cpu',\n custom_module_classes: Tuple[Type[torch.nn.Module]] = (),\n ) -> \"DagModule\":\n \"\"\"\n\n :param path: directory from which model should be loaded\n :param map_location: defaults to `cpu`\n :param custom_module_classes: custom torch module classes needed for loading a `DagModule` that was built\n using these modules\n \"\"\"\n with open(os.path.join(path, 'config.dict.json'), 'r') as f:\n config_dict = json.load(f)\n m = torch.load(os.path.join(path, 'modules.pt'), map_location=map_location)\n return cls.load_from_config_dict_and_atomic_modules(\n config_dict=config_dict,\n atomic_modules=m\n )\n\n @classmethod\n def load_from_config_dict_and_atomic_modules(\n cls,\n config_dict: Dict,\n atomic_modules: List[torch.nn.Module]\n ) -> \"DagModule\":\n output_index = config_dict.pop('output_index')\n name = config_dict.pop('name')\n if 'class' in config_dict:\n class_name = config_dict.pop('class')\n else:\n class_name = cls.__name__\n dag = None\n if class_name == cls.__name__:\n dag = cls(name)\n for subclass in cls.__subclasses__():\n if subclass.__name__ == class_name:\n dag = subclass(name)\n\n if dag is None:\n raise NotImplementedError(f'There is no subclass with name: {class_name}.')\n\n for k, (key, config) in enumerate(config_dict.items()):\n if config['type'] == 'input':\n dag.add_input_vertex(name=config['name'])\n else:\n predecessors = [dag.vertices[index] for index in config['predecessor_indices']]\n if config['is_atomic']:\n module = atomic_modules[config['module_index']]\n else:\n module = cls.load_from_config_dict_and_atomic_modules(\n config_dict=config['module_dict'],\n atomic_modules=atomic_modules,\n )\n vertex = dag.add_vertex(\n name=config['name'],\n module=module,\n predecessors=predecessors,\n )\n orbit = config.get('orbit')\n if orbit:\n vertex.orbit = orbit\n if k == output_index:\n dag.output_vertex = vertex\n\n return dag\n\n def config_dict(self, atomic_modules: List[torch.nn.Module] = None) -> Dict:\n if atomic_modules is None:\n atomic_modules = self.unroll_inner_modules()\n config_dict = {}\n for k, vertex in enumerate(self.vertices):\n _config = vertex.config_dict(atomic_modules)\n config_dict[k] = _config\n\n config_dict['name'] = self.name\n config_dict['class'] = self.__class__.__name__\n config_dict['output_index'] = self.vertices.index(self.output_vertex)\n return config_dict\n\n def _get_inner_vertex_predecessor_indices(self, inner_vertex: InnerVertex) -> List[int]:\n result = [\n self.vertices.index(predecessor)\n for predecessor in inner_vertex.predecessors\n ]\n return result\n\n @property\n def flat(self) -> bool:\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n return False\n return True\n\n def flatten(self, input_shape_for_verification: Optional[Tuple[int, ...]] = None) -> \"DagModule\":\n \"\"\"\n This method will switch the `dag` to `eval` mode if `input_shape_for_verification` is provided.\n :param input_shape_for_verification:\n :return:\n \"\"\"\n dag_copy = deepcopy(self)\n if self.flat:\n return dag_copy\n\n if input_shape_for_verification:\n dag_copy.eval()\n x = torch.normal(mean=torch.zeros(size=input_shape_for_verification))\n reference_output = dag_copy(x)\n\n # builds a new cell (not in place flatten)\n dag = self.__class__(name=dag_copy.name, vertices=dag_copy.input_vertices)\n for v in dag_copy.inner_vertices:\n if not isinstance(v.module, DagModule):\n dag.vertices.append(v)\n v.dag_module = dag\n if v == dag_copy.output_vertex:\n dag.output_vertex = v\n else:\n inner_dag_predecessors = v.predecessors\n inner_dag_successors = v.successors\n inner_dag = v.module.flatten()\n for iv in inner_dag.inner_vertices:\n for pd in iv.predecessors: # remap predecessors where needed\n if isinstance(pd, InputVertex):\n pd_index_in_inner_dag = inner_dag.input_vertices.index(pd)\n index = iv.predecessors.index(pd)\n iv.predecessors[index] = inner_dag_predecessors[pd_index_in_inner_dag]\n if inner_dag.output_vertex == iv: # remap output of inner dag\n for suc in inner_dag_successors:\n index = suc.predecessors.index(v)\n suc.predecessors[index] = iv\n iv.dag_module = dag\n dag.vertices.append(iv)\n if v == dag_copy.output_vertex:\n dag.output_vertex = iv\n assert all([e in dag.vertices for e in iv.predecessors])\n\n if input_shape_for_verification:\n dag.eval()\n new_output = dag(x)\n assert torch.abs(reference_output - new_output).sum() == 0.0\n\n # TODO: Remove after validation\n # self._update_inner_modules()\n dag.enforce_names_uniqueness()\n\n return dag\n\n def enforce_names_uniqueness(self):\n names = [v.name for v in self.vertices]\n while len(names) != len(set(names)):\n names_counter = Counter()\n for v in self.vertices:\n name = v.name\n names_counter[name] += 1\n if names_counter[name] > 1:\n new_name = f'{name}_{names_counter[name] - 1}'\n logger.debug(f'Renaming: {name} -> {new_name}')\n v.name = new_name\n names = [v.name for v in self.vertices]\n\n def clear_tensor_dicts(self):\n self.forward_dict = None\n self.inputs_dict = None\n\n @property\n def device(self):\n # https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180/10\n # useful, but may be dangerous\n self.update_inner_modules()\n device_ = next(iter(self.parameters())).device\n if not all([p.device == device_ for p in self.parameters()]):\n raise AssertionError(f'Not all parameters of {self.name} are on the same device')\n return device_" }, { "identifier": "InputVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InputVertex(Vertex):\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n return {\n 'name': self.name,\n 'type': 'input',\n }" }, { "identifier": "InnerVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InnerVertex(Vertex):\n def __init__(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ):\n super().__init__(name=name)\n self._module = module\n self._predecessors = list(predecessors)\n self.dag_module: \"DagModule\" = None\n self.orbit = None\n\n @property\n def successors(self) -> List['InnerVertex']:\n if self.dag_module is None:\n logger.error(f'Trying to get successors of an InnerVertex that has not been assigned to any DagModule.')\n return [vertex for vertex in self.dag_module.inner_vertices if self in vertex.predecessors]\n\n @property\n def predecessors(self) -> List[Vertex]:\n return self._predecessors\n\n @property\n def predecessor_indices(self) -> List[Vertex]:\n return [self.dag_module.vertices.index(pd) for pd in self.predecessors]\n\n @predecessors.setter\n def predecessors(self, new_predecessors: List[Vertex]):\n if not isinstance(new_predecessors, list):\n logger.error(f'Predecessors is expected to be a list. Got {type(new_predecessors)} except.')\n self._predecessors = new_predecessors\n\n @property\n def module(self) -> torch.nn.Module:\n return self._module\n\n @module.setter\n def module(self, module: torch.nn.Module):\n self._module = module\n # TODO: Remove after validation\n self.dag_module.update_inner_modules()\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n is_atomic = not isinstance(self.module, DagModule)\n result = {\n 'name': self.name,\n 'predecessor_indices': self.predecessor_indices,\n 'is_atomic': is_atomic,\n 'type': 'inner',\n 'orbit': self.orbit,\n }\n if not is_atomic:\n result['module_dict'] = self.module.config_dict(atomic_modules)\n else:\n result['module_index'] = atomic_modules.index(self.module)\n return result" }, { "identifier": "Vertex", "path": "torch_dag/core/dag_module.py", "snippet": "class Vertex:\n MAX_LEN_REPR = None\n\n def __init__(self, name: str):\n self.name = name\n\n def __repr__(self):\n if self.MAX_LEN_REPR is not None and len(self.name) > self.MAX_LEN_REPR:\n return f'{self.name[:self.MAX_LEN_REPR // 2]}...{self.name[-self.MAX_LEN_REPR // 2:]}'\n return self.name\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n return {\n 'name': self.name,\n }" }, { "identifier": "PASS_THROUGH_CHANNELS_CLASSES", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "PASS_THROUGH_CHANNELS_CLASSES = (\n smodules.ChannelAffineModule,\n smodules.NormalizeModule,\n smodules.LayerNormWithOptionalBias,\n smodules.TfBatchNorm1d,\n nn.BatchNorm2d,\n nn.MaxPool2d,\n nn.AvgPool2d,\n nn.AdaptiveAvgPool2d,\n nn.Dropout,\n nn.Upsample,\n nn.LayerNorm,\n nn.BatchNorm1d,\n MaskModule,\n smodules.PowerModule,\n smodules.AddcmulModule,\n smodules.HalfPixelCentersFalseBilinearUpsample,\n smodules.BilinearUpsampling,\n smodules.PadModule,\n smodules.NormalizeModule,\n smodules.InterpolateModule,\n smodules.ScalarMul,\n smodules.MeanModule,\n\n)" }, { "identifier": "is_source", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_source(module: nn.Module):\n return is_linear_source(module) or is_conv_source(module)" }, { "identifier": "get_orbits_dict", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def get_orbits_dict(dag) -> Dict:\n all_orbit_modules = set([v.module.orbit for v in dag.inner_vertices if isinstance(v.module, MaskModule)])\n return {orbit.name: orbit for orbit in all_orbit_modules}" }, { "identifier": "is_linear_source", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_linear_source(module: nn.Module):\n if isinstance(module, nn.Linear):\n return True\n return False" }, { "identifier": "is_depthwise_conv", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_depthwise_conv(module: nn.Module) -> bool:\n return isinstance(module, (\n nn.Conv2d, nn.ConvTranspose2d)) and module.in_channels == module.groups and module.in_channels > 1" }, { "identifier": "OrbitModule", "path": "torch_dag_algorithms/pruning/modules.py", "snippet": "class OrbitModule(torch.nn.Module):\n\n def __init__(\n self,\n name: str,\n num_channels: int,\n distillation_mode: str = constants.PRUNING_DEFAULT_MODE_NAME,\n block_size: Optional[int] = None,\n indices_of_source_vertices=None,\n ):\n super().__init__()\n self.name = name\n self.num_channels = num_channels\n self.distillation_mode = distillation_mode\n self.block_size = block_size\n self._testing_logits = None\n self.conv1 = torch.nn.Conv2d(\n in_channels=num_channels, out_channels=num_channels, kernel_size=3, groups=num_channels)\n self.conv2 = torch.nn.Conv2d(\n in_channels=num_channels,\n out_channels=num_channels,\n kernel_size=1,\n )\n self._optionally_set_block_size_for_whole_block_pruning(distillation_mode=distillation_mode)\n self._validate_distilation_mode_and_block_size(distillation_mode=distillation_mode, block_size=block_size)\n self.bkd_masking_losses = {}\n self.indices_of_source_vertices = indices_of_source_vertices\n self.debug_logits = None\n\n def _validate_distilation_mode_and_block_size(self, distillation_mode: str, block_size: int):\n if distillation_mode not in PRUNING_MODES:\n raise NotImplementedError(f'Distillation mode: {distillation_mode} not supported')\n if distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME and block_size is None:\n raise AssertionError(f'In {constants.PRUNING_BLOCK_SNPE_MODE_NAME} pruning mode block size must not '\n f'be `None`.')\n\n def _optionally_set_block_size_for_whole_block_pruning(self, distillation_mode: str):\n if distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n self.block_size = self.num_channels\n\n @staticmethod\n def clip_logits(\n logits: torch.Tensor,\n clip_val=constants.MAX_LOGITS_ABS_VALUE,\n ) -> torch.Tensor:\n return torch.clip(logits, min=-clip_val, max=clip_val)\n\n @property\n def logits(self) -> torch.Tensor:\n # TODO This is a hack for testing, remove/refactor it\n if self.debug_logits is not None:\n return self.debug_logits\n kernel_size = self.conv1.kernel_size\n device = self.conv1.weight.device\n x = torch.ones(size=(1, self.num_channels, *kernel_size), device=device)\n x = self.conv1(x)\n x = self.conv2(x)\n x = (constants.INITIAL_LOGITS_VALUE_FOR_PRUNING + constants.SIMPLE_ORBIT_LOGITS_MULTIPLIER * x)\n return self.clip_logits(torch.mean(x, dim=(0, 2, 3), keepdim=False))\n\n def compute_average_number_of_output_channels(self):\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n return torch.sigmoid(self.logits).sum()\n\n elif self.distillation_mode in (\n constants.PRUNING_BLOCK_SNPE_MODE_NAME, constants.PRUNING_WHOLE_BLOCK_MODE_NAME):\n split_list = get_split_list_of_logits(logits=self.logits, block_size=self.block_size)\n max_per_block_logits = get_sorted_per_block_max_logits(\n logits=self.logits,\n block_size=self.block_size,\n )\n num_channels = torch.stack(\n [float(block_size) * torch.sigmoid(max_logit) for \\\n block_size, max_logit in zip(split_list, max_per_block_logits)], dim=0).sum()\n return num_channels\n else:\n msg = f'Mode {self.distillation_mode} not implemented for average channels computation.'\n raise NotImplementedError(msg)\n\n def compute_output_channel_masks(\n self,\n predecessors_channel_masks: List[List[torch.Tensor]] = None,\n ) -> List[torch.Tensor]:\n predecessors_channel_masks = [mask_list for mask_list in predecessors_channel_masks if mask_list is not None]\n logits = self.logits\n num_logits = int(logits.shape[0])\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n scores_ = torch.where(\n logits > 0.0,\n 1,\n 0,\n )\n elif self.distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 1:\n scores_ = np.ones(shape=(self.block_size,), dtype=np.int32)\n else:\n scores_ = np.zeros(shape=(self.block_size,), dtype=np.int32)\n\n elif self.distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 0:\n # removing whole orbit\n scores_ = np.zeros(shape=(self.num_channels,), dtype=np.int32)\n\n else:\n # compute block indices that are left\n sorted_logits = torch.sort(logits, descending=True)[0]\n split_list = get_split_list_of_logits(logits=logits, block_size=self.block_size)\n split_sorted_logits = list(torch.split(sorted_logits, split_list))\n residual = num_logits % self.block_size\n if residual != 0:\n logits_fake_tail = split_sorted_logits[-1].mean() * torch.ones(\n size=(self.block_size - residual,))\n split_sorted_logits[-1] = torch.cat([split_sorted_logits[-1], logits_fake_tail], dim=0)\n split_sorted_logits = [e.detach().numpy() for e in split_sorted_logits]\n if len(split_sorted_logits) == 1:\n res = split_sorted_logits\n else:\n res = np.take(\n split_sorted_logits,\n axis=0,\n indices=indices_of_blocks_to_leave,\n )\n threshold_value = torch.tensor(res).min()\n scores_ = np.where(\n logits >= threshold_value,\n 1,\n 0,\n )\n else:\n raise NotImplementedError\n\n if len(predecessors_channel_masks) == 0:\n return [torch.tensor(scores_)]\n else:\n return [torch.tensor(np.where(\n predecessors_channel_masks[0][0].sum() == 0,\n np.array([0] * self.num_channels, dtype=np.int32),\n scores_,\n ))]\n\n def sample(self):\n return sample_from_logits(logits=self.logits)" }, { "identifier": "compute_timm_average_num_channels", "path": "torch_dag_timm_plugin/module_multipliers.py", "snippet": "@singledispatch\ndef compute_timm_average_num_channels(\n module: torch.nn.Module,\n vertex: InnerVertex,\n average_number_input_channels: List[List[torch.Tensor]],\n orbits_dict: Dict[str, OrbitModule],\n forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]]\n) -> Union[List[torch.Tensor], None]:\n raise NotImplementedError" }, { "identifier": "CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES", "path": "torch_dag_timm_plugin/module_multipliers.py", "snippet": "CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES = ()" } ]
import logging import torch from typing import List, Tuple, Dict, Union from torch_dag import structured_modules as smodules from torch_dag.core.dag_module import DagModule from torch_dag.core.dag_module import InputVertex, InnerVertex, Vertex from torch_dag_algorithms.pruning.commons import PASS_THROUGH_CHANNELS_CLASSES from torch_dag_algorithms.pruning.commons import is_source, get_orbits_dict, is_linear_source, is_depthwise_conv from torch_dag_algorithms.pruning.modules import OrbitModule from torch_dag_timm_plugin.module_multipliers import compute_timm_average_num_channels, \ CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES
11,830
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels( vertex: InnerVertex, average_number_input_channels: List[List[torch.Tensor]], orbits_dict: Dict[str, OrbitModule], forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]] ) -> Union[List[torch.Tensor], None]: device = forward_dict[vertex.dag_module.input_vertices[0]].device if isinstance(vertex.module, PASS_THROUGH_MULTIPLIER_CLASSES): return [average_number_input_channels[0][0]] if is_source(vertex.module): if vertex.orbit is not None: orbit_module = orbits_dict[vertex.orbit] return [orbit_module.compute_average_number_of_output_channels()] else: if is_linear_source(vertex.module): return [shape_to_float(forward_dict[vertex].shape, dim=-1, device=device)] else: return [shape_to_float(forward_dict[vertex].shape, device=device)]
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels( vertex: InnerVertex, average_number_input_channels: List[List[torch.Tensor]], orbits_dict: Dict[str, OrbitModule], forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]] ) -> Union[List[torch.Tensor], None]: device = forward_dict[vertex.dag_module.input_vertices[0]].device if isinstance(vertex.module, PASS_THROUGH_MULTIPLIER_CLASSES): return [average_number_input_channels[0][0]] if is_source(vertex.module): if vertex.orbit is not None: orbit_module = orbits_dict[vertex.orbit] return [orbit_module.compute_average_number_of_output_channels()] else: if is_linear_source(vertex.module): return [shape_to_float(forward_dict[vertex].shape, dim=-1, device=device)] else: return [shape_to_float(forward_dict[vertex].shape, device=device)]
elif is_depthwise_conv(vertex.module):
9
2023-11-17 15:36:44+00:00
16k
newcastleuniversity/DISPEL
dispel/providers/generic/preprocessing.py
[ { "identifier": "Level", "path": "dispel/data/levels.py", "snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "DEFAULT_COLUMNS", "path": "dispel/data/raw.py", "snippet": "DEFAULT_COLUMNS = list(\"xyz\")" }, { "identifier": "GRAVITY_COLUMNS", "path": "dispel/data/raw.py", "snippet": "GRAVITY_COLUMNS = [f\"gravity{x}\" for x in \"XYZ\"]" }, { "identifier": "ProcessingStep", "path": "dispel/processing/core.py", "snippet": "class ProcessingStep:\n r\"\"\"A processing step in a processing sequence.\n\n :class:`ProcessingStep` is the basic entity through which\n :class:`~dispel.data.core.Reading`\\ s are processed. The processing step's\n :meth:`process_reading` function is called with the reading and additional arguments\n passed to :func:`process`. Results from the process step are expected to be an\n instance of :class:`ProcessingResult`. For a comprehensive description see\n :ref:`measure-extraction`.\n\n The method :meth:`flag_reading` can be overwritten to ensure that the reading\n about to be processed is valid, and return\n :class:`~dispel.data.flags.Flag`\\ s if that is not the case.\n\n Examples\n --------\n .. testsetup:: processing-step\n\n >>> import pandas as pd\n >>> import numpy as np\n\n >>> from dispel.data.core import Reading\n >>> from dispel.data.levels import Level\n >>> from dispel.data.raw import (RawDataSet, RawDataSetDefinition,\n ... RawDataValueDefinition)\n\n >>> reading = Reading(\n ... evaluation=None,\n ... levels=[\n ... Level(id_='my-level', start=0, end=1, raw_data_sets=[\n ... RawDataSet(\n ... RawDataSetDefinition('my-data-set', None, [\n ... RawDataValueDefinition('dummy', 'dummy')\n ... ]),\n ... pd.DataFrame({'dummy': list(range(6))})\n ... )\n ... ])\n ... ])\n\n .. doctest:: processing-step\n\n >>> from dispel.data.measures import MeasureValue\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing import process\n >>> from dispel.processing.core import ProcessingResult, ProcessingStep\n >>> class MyStep(ProcessingStep):\n ... def process_reading(self, reading, **kwargs):\n ... level = reading.get_level('my-level')\n ... raw_data_set = level.get_raw_data_set('my-data-set')\n ... data = raw_data_set.data\n ... yield ProcessingResult(\n ... step=self,\n ... sources=raw_data_set,\n ... result=MeasureValue(\n ... ValueDefinition('my-measure-id','max value'),\n ... data.max().max()\n ... )\n ... )\n >>> _ = process(reading, MyStep())\n >>> reading.measure_set.get_raw_value('my-measure-id')\n 5\n \"\"\"\n\n def __init__(self):\n self.predecessor = None\n self.successor = None\n\n def process(self, reading: Reading, **kwargs) -> ProcessResultType:\n \"\"\"Check reading for validity and process it.\n\n Parameters\n ----------\n reading\n The reading to be processed\n kwargs\n Additional arguments passed by :func:`process`.\n\n Yields\n ------\n ProcessResultType\n The results from processing readings.\n \"\"\"\n for flag in self.flag_reading(reading, **kwargs):\n yield ProcessingControlResult.from_flag(\n flag=flag,\n step=self,\n targets=self.get_reading_flag_targets(reading, **kwargs),\n )\n try:\n self.assert_valid_reading(reading, **kwargs)\n except AssertionError as error:\n yield ProcessingControlResult.from_assertion_error(step=self, error=error)\n else:\n yield from self.process_reading(reading, **kwargs)\n\n def assert_valid_reading(self, reading: Reading, **kwargs):\n \"\"\"Assert that reading is valid.\"\"\"\n\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n \"\"\"Flag the provided reading.\n\n Parameters\n ----------\n reading\n The reading to be flagged.\n kwargs\n Additional arguments passed by :func:`~dispel.processing.process`.\n\n Yields\n ------\n Flag\n The resulted flags.\n \"\"\"\n # pylint: disable=unused-argument\n yield from []\n\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n \"\"\"Get the reading flag targets.\n\n Parameters\n ----------\n reading\n The reading that is concerned with flagging.\n kwargs\n Additional keyword arguments eventually used for flag targets\n extraction.\n\n Returns\n -------\n Iterable[EntityType]\n An iterable of entities that are flagged.\n \"\"\"\n # pylint: disable=unused-argument\n return [reading]\n\n @abstractmethod\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n \"\"\"Process the provided reading.\n\n Parameters\n ----------\n reading\n The reading to be processed\n kwargs\n Additional arguments passed by :func:`~dispel.processing.process`.\n\n Yields\n ------\n ProcessResultType\n The results from processing readings.\n \"\"\"\n yield NotImplemented\n\n def set_previous(self, step: \"ProcessingStep\"):\n \"\"\"Set the previous step in a processing chain of this step.\"\"\"\n if self.predecessor is not None:\n warnings.warn(\n \"Changing predecessors can lead to side-effects. Previous predecessor \"\n f\"was {self.predecessor}\",\n UserWarning,\n )\n self.predecessor = step\n\n def set_next(self, step: \"ProcessingStep\"):\n \"\"\"Set the next step in a processing chain of this step.\"\"\"\n if self.successor is not None:\n warnings.warn(\n \"Changing successors can lead to side-effects. Previous successor was \"\n f\"{self.successor}\",\n UserWarning,\n )\n self.successor = step\n\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n \"\"\"Chain this step with the successor step.\"\"\"\n assert isinstance(successor, ProcessingStep), \"Can only chain processing steps\"\n\n self.set_next(successor)\n successor.set_previous(self)\n return _ChainedProcesses([self, successor])\n\n def __and__(self, other):\n \"\"\"See :meth:`ProcessingStep.chain`.\"\"\"\n return self.chain(other)\n\n def get_parameters(self) -> List[Tuple[str, Parameter]]:\n \"\"\"Get all parameters defined by the processing step.\n\n Returns\n -------\n List[Tuple[str, Parameter]]\n A list of tuples of parameter name and :class:`Parameter`\n objects defined by the processing step.\n \"\"\"\n return inspect.getmembers(self, lambda x: isinstance(x, Parameter))" }, { "identifier": "DefaultLevelFilter", "path": "dispel/processing/level.py", "snippet": "class LevelProcessingResultBase:\nclass LevelProcessingResult(ProcessingResult, LevelProcessingResultBase):\nclass LevelProcessingControlResult(ProcessingControlResult, LevelProcessingResultBase):\nclass LevelFilter(ABC):\nclass LevelIdFilter(LevelFilter):\nclass DefaultLevelFilter(LevelFilter):\nclass LevelProcessingStepProtocol(metaclass=ABCMeta):\nclass LevelFilterProcessingStepMixin:\nclass LevelProcessingStep(\n LevelProcessingStepProtocol, LevelFilterProcessingStepMixin, ProcessingStep\n):\nclass FlagLevelStep(FlagStepMixin, LevelProcessingStep):\nclass ProcessingStepGroup(LevelFilterProcessingStepMixin, CoreProcessingStepGroup):\n def __post_init__(self):\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\ndef _intersection(a, b):\ndef _union(a, b):\n def __call__(self, levels: Iterable[Level]) -> Set[Level]:\n def __repr__(self) -> str:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def _combined(\n self, other: \"LevelFilter\", func: Callable[[Set, Set], Set]\n ) -> \"LevelFilter\":\n def _match(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __and__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __or__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __invert__(self) -> \"LevelFilter\":\n def _inverted_filter(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __init__(self, level_ids: MultipleLevelIdsType):\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(self, *args, **kwargs):\n def get_level_filter(self) -> LevelFilter:\n def set_level_filter(self, level_filter: LevelFilterType):\n def inject_level_filter_from_step(self, step: \"LevelFilterProcessingStepMixin\"):\n def _get_level_filter(inner_self) -> LevelFilter:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(\n self,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def set_steps(self, steps: List[ProcessingStep]):\n def inject_level_filter_from_step(self, step: LevelFilterProcessingStepMixin):" }, { "identifier": "LimbModality", "path": "dispel/processing/modalities.py", "snippet": "class LimbModality(AVEnum):\n \"\"\"Type of limb exercises enumerator.\"\"\"\n\n UPPER_LIMB = (\"upper limb\", \"upper\")\n LOWER_LIMB = (\"lower limb\", \"lower\")" }, { "identifier": "SensorModality", "path": "dispel/processing/modalities.py", "snippet": "class SensorModality(AVEnum):\n # FIXME remove class\n \"\"\"Sensor types enumerator.\"\"\"\n\n def unit(self, order: int = 1) -> str:\n \"\"\"Get the unit of the sensor signal.\n\n Parameters\n ----------\n order\n The unit order.\n\n Returns\n -------\n str\n The unit of the sensor.\n \"\"\"\n basis = {\"acc\": \"G\", \"gyr\": \"rad/s\", \"itrem\": \"pixel\"}[self.abbr]\n if order == 1:\n return basis\n return \"/\".join([x + f\"^{order}\" for x in basis.split(\"/\")])\n\n ACCELEROMETER = (\"accelerometer\", \"acc\")\n GYROSCOPE = (\"gyroscope\", \"gyr\")\n INTENTIONAL = (\"intentional tremors\", \"itrem\")" }, { "identifier": "Apply", "path": "dispel/processing/transform.py", "snippet": "class Apply(TransformStep):\n r\"\"\"Apply a method onto columns of a raw data set.\n\n Parameters\n ----------\n data_set_id\n The data set id of the data set on which the method is to be applied\n method\n The method in question. This can be any method that accepts a pandas series and\n returns an array of same length. See also :meth:`pandas.DataFrame.apply`.\n method_kwargs\n Optional arguments required for the methods.\n columns\n The columns to be considered during the method application.\n drop_nan\n ```True`` if NaN values are to be droped after transformation.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels\n to be transformed. If no filter is provided, all levels will be transformed. The\n ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\\ s\n and lists of either and passes them to a\n :class:`~dispel.processing.level.LevelIdFilter` for convenience.\n new_data_set_id\n The ``id`` used for the :class:`~dispel.data.raw.RawDataSetDefinition`.\n\n Examples\n --------\n Assuming you want to low-pass filter your gyroscope data of a ``reading`` you can\n create the following step to do so (note that the filtering expects a\n time-index-based and constant frequency-based data frame, so you might have to\n leverage :class:`~dispel.providers.generic.sensor.SetTimestampIndex` and\n :class:`~dispel.providers.generic.sensor.Resample` first):\n\n >>> from dispel.processing.transform import Apply\n >>> from dispel.signal.filter import butterworth_low_pass_filter\n >>> step = Apply(\n ... 'gyroscope_ts_resampled',\n ... butterworth_low_pass_filter,\n ... dict(cutoff=1.5, order=2),\n ... list('xyz'),\n ... )\n\n This step will apply a 2. order butterworth low pass filter to the columns ``x``,\n ``y``, and ``z`` with a cut-off frequency of 1.5Hz.\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n method: Callable[..., Any],\n method_kwargs: Optional[Dict[str, Any]] = None,\n columns: Optional[List[str]] = None,\n new_data_set_id: Optional[str] = None,\n drop_nan: Optional[bool] = False,\n level_filter: Optional[LevelFilterType] = None,\n ):\n method_kwargs = method_kwargs or {}\n columns = columns or DEFAULT_COLUMNS\n\n def _transform_function(data: pd.DataFrame) -> pd.DataFrame:\n res = data[columns].apply(method, **method_kwargs)\n if drop_nan:\n return res.dropna()\n return res\n\n def _definition_factory(column: str) -> RawDataValueDefinition:\n return RawDataValueDefinition(\n column, f\"{method.__name__} applied on {column}\"\n )\n\n super().__init__(\n data_set_id,\n _transform_function,\n new_data_set_id or f\"{data_set_id}_{method.__name__}\",\n [_definition_factory(column) for column in columns],\n level_filter=level_filter,\n )" }, { "identifier": "ComputeGravityRotationMatrices", "path": "dispel/providers/generic/sensor.py", "snippet": "class ComputeGravityRotationMatrices(TransformStep):\n r\"\"\"Compute a series of rotation matrices to align sensors to gravity.\n\n This transformation step creates a series of rotation matrices based on the\n gravity information contained in the accelerometer sensor. This allows to\n rotate other sensors on a desired orientation related to gravity. This is\n in particular of interest if we want to measure physical interactions with\n devices around the plane perpendicular to gravity.\n\n Parameters\n ----------\n target_gravity\n The target gravity vector, e.g. ``(-1, 0, 0)`` to create rotation\n matrices that rotate the x-axis of a device onto gravity.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n def __init__(\n self, data_set_id: str, target_gravity: Tuple[float, float, float], **kwargs\n ):\n def _transform_function(data: pd.DataFrame) -> pd.Series:\n return compute_rotation_matrices_quaternion(\n data[GRAVITY_COLUMNS], target_gravity\n )\n\n super().__init__(\n data_set_id,\n _transform_function,\n \"gravity_rotation_matrices\",\n [RawDataValueDefinition(\"rotation_matrix\", \"Rotation Matrix\")],\n **kwargs,\n )" }, { "identifier": "Resample", "path": "dispel/providers/generic/sensor.py", "snippet": "class Resample(NotEmptyDataSetAssertionMixin, TransformStep):\n r\"\"\"Resample a time-based raw data set to a specific sampling frequency.\n\n The resampling creates a new raw data set which is accessible via the\n data set comprised of the original one concatenated with ``_resampled``.\n\n Parameters\n ----------\n data_set_id\n The data set to be resampled. This has to be a data set that uses a\n time-based index. You might first have to apply the\n :class:`SetTimestampIndex` processing step before you can apply\n this step.\n aggregations\n A list of resampling methods to be applied in order. Each can be any\n method that is also accepted by :meth:`pandas.DataFrame.agg`.\n columns\n The columns to be considered during the resampling.\n freq\n The frequency to resample to. See also\n :meth:`pandas.DataFrame.resample` for details. If freq is not provided\n the frequency is estimated automatically taking the median frequency.\n max_frequency_distance\n An optional integer specifying the maximum accepted\n distance between the expected frequency and the estimated frequency\n above which we raise an error.\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n aggregations: Iterable[str],\n columns: Iterable[str],\n freq: Optional[Union[float, str]] = None,\n max_frequency_distance: Optional[int] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def _resample(\n data: pd.DataFrame, sampling_frequency: Optional[Union[float, str]] = None\n ) -> pd.DataFrame:\n # Check if a sampling frequency is provided\n # If not, we discretized the sampling frequency\n if sampling_frequency is None:\n discretize_args = [data, VALID_FREQ_LIST]\n if max_frequency_distance:\n discretize_args.append(max_frequency_distance)\n sampling_frequency = discretize_sampling_frequency(*discretize_args)\n # Convert the float sampling frequency to a Timedelta format\n if not isinstance(sampling_frequency, str):\n sampling_frequency = pd.Timedelta(1 / sampling_frequency, unit=\"s\")\n resample_obj = data[columns].resample(sampling_frequency)\n for method in aggregations:\n resample_obj = resample_obj.agg(method)\n return resample_obj\n\n def _definition_factory(column: str) -> RawDataValueDefinition:\n return RawDataValueDefinition(\n column, f\"{column} resampled with {aggregations}\"\n )\n\n super().__init__(\n data_set_id,\n partial(_resample, sampling_frequency=freq),\n f\"{data_set_id}_resampled\",\n [_definition_factory(column) for column in columns],\n level_filter=level_filter,\n )" }, { "identifier": "RotateSensorWithGravityRotationMatrices", "path": "dispel/providers/generic/sensor.py", "snippet": "class RotateSensorWithGravityRotationMatrices(TransformStep):\n r\"\"\"Apply a series of rotation matrices to a sensor.\n\n This is a complementary step to :class:`ComputeGravityRotationMatrices` and\n applies the rotation matrices to the specified sensor.\n\n Parameters\n ----------\n data_set_id\n The id of the sensor data set to be rotated.\n columns\n The columns of the sensor data set to be considered in the rotation.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n\n Examples\n --------\n Assuming you want to rotate the gyroscope vector onto gravity you can\n achieve this by chaining the following steps:\n\n .. doctest:: processing\n\n >>> from dispel.data.raw import DEFAULT_COLUMNS\n >>> from dispel.processing import process\n >>> from dispel.providers.generic.sensor import (\n ... ComputeGravityRotationMatrices,\n ... RotateSensorWithGravityRotationMatrices\n ... )\n >>> cols = DEFAULT_COLUMNS\n >>> steps = [\n ... ComputeGravityRotationMatrices('accelerometer', (-1, 0, 0)),\n ... RotateSensorWithGravityRotationMatrices('gyroscope', cols)\n ... ]\n >>> _ = process(reading, steps) # doctest: +SKIP\n\n The results of the roation are available in the raw data set with the id\n ``<data_set_id>_rotated``:\n\n .. doctest:: processing\n :options: +NORMALIZE_WHITESPACE\n\n >>> level = reading.get_level(level_id) # doctest: +SKIP\n >>> level.get_raw_data_set('gyroscope').data.head() # doctest: +SKIP\n x y z ts\n 0 0.035728 -0.021515 0.014879 2020-05-04 17:31:38.574\n 1 -0.012046 0.005010 -0.009029 2020-05-04 17:31:38.625\n 2 0.006779 0.000761 -0.003253 2020-05-04 17:31:38.680\n 3 0.032636 -0.020272 -0.021915 2020-05-04 17:31:38.729\n 4 0.007495 -0.014061 0.012886 2020-05-04 17:31:38.779\n >>> level.get_raw_data_set(\n ... 'gyroscope_rotated'\n ... ).data.head() # doctest: +SKIP\n x y z\n 0 -0.002309 -0.042509 -0.012182\n 1 -0.003754 0.014983 0.003624\n 2 -0.002237 -0.002116 -0.006901\n 3 -0.030461 -0.021654 -0.023656\n 4 0.001203 -0.019580 0.005924\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n columns: Iterable[str],\n level_filter: Optional[LevelFilterType] = None,\n ):\n def _transform_function(\n sensor_df: pd.DataFrame, matrices: pd.DataFrame\n ) -> pd.DataFrame:\n return apply_rotation_matrices(\n matrices[\"rotation_matrix\"], sensor_df[columns]\n )\n\n def _definition_factory(column: str) -> RawDataValueDefinition:\n return RawDataValueDefinition(column, f\"{column} rotated\")\n\n super().__init__(\n [data_set_id, \"gravity_rotation_matrices\"],\n _transform_function,\n f\"{data_set_id}_rotated\",\n [_definition_factory(column) for column in columns],\n level_filter=level_filter,\n )" }, { "identifier": "SetTimestampIndex", "path": "dispel/providers/generic/sensor.py", "snippet": "class SetTimestampIndex(TransformStep):\n r\"\"\"Create a new time series based on a date time or time delta column.\n\n Parameters\n ----------\n data_set_id\n The data set id of the time series to be transformed.\n columns\n The columns to consider in the new raw data set.\n time_stamp_column\n The time series column name to use as index.\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n duplicates\n The strategy used to handle duplicates.\n Has to be one of ``ignore``, ``raise``, ``first``, ``last``.\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n columns: List[str],\n time_stamp_column: str = \"ts\",\n level_filter: Optional[LevelFilterType] = None,\n duplicates: Optional[str] = None,\n ):\n def _transform_function(\n data: pd.DataFrame, rm_duplicate: Optional[str]\n ) -> pd.DataFrame:\n if rm_duplicate is None:\n return data.set_index(time_stamp_column)[columns].copy()\n res = data.set_index(time_stamp_column)[columns].copy()\n return res[~res.index.duplicated(keep=duplicates)]\n\n super().__init__(\n data_set_id,\n lambda x: _transform_function(x, duplicates),\n f\"{data_set_id}_ts\",\n [RawDataValueDefinition(column, column) for column in columns],\n level_filter=level_filter,\n )" }, { "identifier": "TransformGyroscope", "path": "dispel/providers/generic/sensor.py", "snippet": "class TransformGyroscope(TransformStep):\n r\"\"\"Format gyroscope data to ADS format if not already the case.\n\n On ADS format, the gyroscope is synchronized with the accelerometer. Here\n we make sure gyroscope is synchronized with the acc data set.\n\n Parameters\n ----------\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n data_set_ids = [\"acc\", \"gyroscope\"]\n new_data_set_id = \"gyroscope\"\n\n definitions = [\n RawDataValueDefinition(\n axis, f\"Rotation speed along the {axis} axis.\", data_type=\"float\"\n )\n for axis in \"xyz\"\n ] + [RawDataValueDefinition(\"ts\", \"time index\")]\n\n @staticmethod\n @transformation\n def _synchronize_gyroscope(\n accelerometer: pd.DataFrame, gyroscope: pd.DataFrame, reading: Reading\n ) -> pd.DataFrame:\n if isinstance(reading, BDHReading):\n # Merging on the timestamps vs. on the indexes\n acc_renamed = accelerometer.rename(\n mapper={\n \"x\": \"userAccelerationX\",\n \"y\": \"userAccelerationY\",\n \"z\": \"userAccelerationZ\",\n },\n axis=1,\n )\n return pd.merge_asof(acc_renamed, gyroscope, on=\"ts\", direction=\"nearest\")[\n [\"ts\", \"x\", \"y\", \"z\"]\n ]\n return gyroscope" }, { "identifier": "TransformUserAcceleration", "path": "dispel/providers/generic/sensor.py", "snippet": "class TransformUserAcceleration(TransformStep):\n r\"\"\"Format accelerometer data to ADS format if not already the case.\n\n Prior to formatting, linear acceleration and gravity are decoupled\n from acceleration.\n\n Parameters\n ----------\n level_filter\n An optional :class:`dispel.processing.level.LevelFilter` to determine the\n levels to be transformed. If no filter is provided, all levels will be\n transformed. The ``level_filter`` also accepts :class:`str`,\n :class:`~dispel.data.core.LevelId`\\ s and lists of either and passes them\n to a :class:`~dispel.processing.level.LevelFilter` for convenience.\n \"\"\"\n\n data_set_ids = \"accelerometer\"\n new_data_set_id = \"acc\"\n\n definitions = (\n [\n RawDataValueDefinition(\n f\"userAcceleration{axis}\",\n f\"Linear Acceleration along the {axis} axis.\",\n data_type=\"float\",\n )\n for axis in \"XYZ\"\n ]\n + [\n RawDataValueDefinition(\n f\"gravity{axis}\",\n f\"gravity component along the {axis} axis.\",\n data_type=\"float\",\n )\n for axis in \"XYZ\"\n ]\n + [RawDataValueDefinition(\"ts\", \"time index\")]\n )\n\n @staticmethod\n def add_gravity(\n accelerometer: pd.DataFrame,\n level: Level,\n gravity: Optional[pd.DataFrame] = None,\n ) -> pd.DataFrame:\n \"\"\"Format gravity data to ADS format.\"\"\"\n if gravity is None:\n cols = [\"x\", \"y\", \"z\"]\n raw_acc = level.get_raw_data_set(\"raw_accelerometer\").data\n accelerometer = raw_acc\n if level.has_raw_data_set(\"attitude\"):\n ori = level.get_raw_data_set(\"attitude\").data\n ori_cols = [\"w\", \"x\", \"y\", \"z\"]\n lin_accelerometer, gravity = remove_gravity_component_ori(\n accelerometer[cols].values, ori[ori_cols].values\n )\n lin_accelerometer = pd.DataFrame(lin_accelerometer, columns=cols)\n gravity = pd.DataFrame(gravity, columns=cols)\n else:\n lin_accelerometer, gravity = remove_gravity_component(\n accelerometer[cols]\n )\n\n res = pd.DataFrame(\n {\n \"userAccelerationX\": lin_accelerometer[\"x\"],\n \"userAccelerationY\": lin_accelerometer[\"y\"],\n \"userAccelerationZ\": lin_accelerometer[\"z\"],\n }\n )\n res[\"gravityX\"] = gravity[\"x\"]\n res[\"gravityY\"] = gravity[\"y\"]\n res[\"gravityZ\"] = gravity[\"z\"]\n res[\"ts\"] = accelerometer[\"ts\"]\n else:\n # Merging on the timestamps vs. on the indexes\n acc_renamed = accelerometer.rename(\n mapper={\n \"x\": \"userAccelerationX\",\n \"y\": \"userAccelerationY\",\n \"z\": \"userAccelerationZ\",\n },\n axis=1,\n )\n gravity_renamed = gravity.rename(\n mapper={\"x\": \"gravityX\", \"y\": \"gravityY\", \"z\": \"gravityZ\"}, axis=1\n )\n merged = acc_renamed.merge(gravity_renamed, how=\"outer\")\n merged = merged.set_index(\"ts\")\n merged_sorted = merged.sort_index()\n merged_sorted_interpolated = merged_sorted.interpolate(\n method=\"nearest\", limit_direction=\"both\"\n )\n res = merged_sorted_interpolated.loc[acc_renamed.ts].reset_index()\n return res.dropna()\n\n @staticmethod\n @transformation\n def _reformat(accelerometer: pd.DataFrame, level: Level) -> pd.DataFrame:\n target_cols = {\n f\"{sensor}{axis}\"\n for sensor in (\"userAcceleration\", \"gravity\")\n for axis in \"XYZ\"\n }\n if not target_cols.issubset(accelerometer.columns):\n try:\n return TransformUserAcceleration.add_gravity(\n accelerometer, level, level.get_raw_data_set(\"gravity\").data\n )\n except ValueError:\n # Happens in BDH pinch\n return TransformUserAcceleration.add_gravity(accelerometer, level)\n return accelerometer" }, { "identifier": "butterworth_high_pass_filter", "path": "dispel/signal/filter.py", "snippet": "def butterworth_high_pass_filter(\n data: pd.Series,\n cutoff: float,\n order: int = 2,\n freq: Optional[float] = None,\n zero_phase: Optional[bool] = True,\n) -> pd.Series:\n \"\"\"Filter a series with a butterworth high-pass filter.\n\n Parameters\n ----------\n data\n The time series to be filtered\n cutoff\n The upper bound of frequencies to filter\n freq\n The sampling frequency of the time series in Hz. If the passed ``data`` has\n an evenly spaced time series index it will be determined automatically.\n order\n The order of the filter\n zero_phase\n Boolean indicating whether zero phase filter (filtfilt) to be used\n\n Returns\n -------\n pandas.Series\n The filtered ``data``.\n \"\"\"\n return _butterworth_filter(data, \"high\", cutoff, order, freq, zero_phase)" }, { "identifier": "savgol_filter", "path": "dispel/signal/filter.py", "snippet": "def savgol_filter(data: pd.Series, window: int = 41, order: int = 3) -> pd.Series:\n \"\"\"Apply the Savitzky-Golay filter on a class:`~pandas.Series`.\n\n Parameters\n ----------\n data\n Input data\n window\n the length of the filter window\n order\n The order of the polynomial used to fit the samples\n\n Returns\n -------\n pandas.Series\n Filtered data\n \"\"\"\n # apply filter.\n res = pd.Series(\n signal.savgol_filter(data, window, order), index=data.index, name=data.name\n )\n return res" }, { "identifier": "check_amplitude", "path": "dispel/signal/sensor.py", "snippet": "def check_amplitude(\n data: pd.DataFrame, min_amplitude: float, max_amplitude: float\n) -> bool:\n \"\"\"Check if the signal amplitudes belong to a reasonable range.\n\n The function will return true only if all the values of each column are between the\n min and max amplitude bounds.\n\n Parameters\n ----------\n data\n A data frame containing one column or more. The data contains in columns must\n all have the same nature as the bounds are applied on the entire data frame.\n min_amplitude\n The expected min amplitude.\n max_amplitude\n The expected max amplitude.\n\n Returns\n -------\n bool\n ``True`` if all the values are in the range. ``False`` otherwise.\n \"\"\"\n amplitude = data.max() - data.min()\n return amplitude.between(left=min_amplitude, right=max_amplitude).all()" }, { "identifier": "detrend_signal", "path": "dispel/signal/sensor.py", "snippet": "def detrend_signal(signal: pd.Series) -> pd.Series:\n \"\"\"Detrend signal and remove offset component.\n\n The final signal will end up centered on zero and stationary. This function is based\n on :func:`scipy.stats.linregress`.\n\n Parameters\n ----------\n signal: pandas.Series\n The raw signal.\n\n Returns\n -------\n pandas.Series\n The detrended signal.\n \"\"\"\n original_x = signal.index.to_numpy(float)\n signal_without_na = signal.dropna()\n y = signal_without_na.to_numpy(float)\n x = signal_without_na.index.to_numpy(float)\n (\n slope,\n intercept,\n *_,\n ) = stats.linregress(x, y)\n y_estimate = slope * original_x + intercept\n return signal - y_estimate" } ]
from typing import Iterable, List, Optional, Set, Tuple from dispel.data.levels import Level from dispel.data.raw import DEFAULT_COLUMNS, GRAVITY_COLUMNS from dispel.processing import ProcessingStep from dispel.processing.level import ( DefaultLevelFilter, LevelFilter, LevelFilterType, LevelIdFilter, ProcessingStepGroup, ) from dispel.processing.modalities import LimbModality, SensorModality from dispel.processing.transform import Apply from dispel.providers.generic.sensor import ( ComputeGravityRotationMatrices, Resample, RotateSensorWithGravityRotationMatrices, SetTimestampIndex, TransformGyroscope, TransformUserAcceleration, ) from dispel.signal.filter import butterworth_high_pass_filter, savgol_filter from dispel.signal.sensor import check_amplitude, detrend_signal
12,329
max_amplitude: float, min_amplitude: float, columns: Optional[List[str]] = None, ): self.data_set_id = data_set_id self.columns = columns self.max_amplitude = max_amplitude self.min_amplitude = min_amplitude def repr(self): """Get representation of the filter.""" return f"only {self.data_set_id} signal with acceptable amplitude>" def filter(self, levels: Iterable[Level]) -> Set[Level]: """Filter levels with acceptable signal amplitude.""" def _amplitude_filter(level: Level): if level.has_raw_data_set(self.data_set_id): data = level.get_raw_data_set(self.data_set_id).data if self.columns: data = data[self.columns] return check_amplitude(data, self.min_amplitude, self.max_amplitude) return True return set(filter(_amplitude_filter, levels)) class RotateFrame(ProcessingStepGroup): r"""A changing referential preprocessing step according a given data set. Parameters ---------- data_set_id The data set id on which the transformation is to be performed. gravity_data_set_id The dataset id containing the gravity components. frame The new desired frame. columns The columns onto which the resampling steps have to be applied. kwargs Additional arguments that are passed to the :meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This allows to provide additional values, such as placeholder values in value definitions to the actual processing function. """ def __init__( self, data_set_id: str, gravity_data_set_id: str, frame: Tuple[int, int, int], columns: Optional[List[str]] = None, **kwargs, ): columns = columns or DEFAULT_COLUMNS steps: List[ProcessingStep] = [ ComputeGravityRotationMatrices( gravity_data_set_id, frame, storage_error="ignore" ), RotateSensorWithGravityRotationMatrices( data_set_id, columns, ), ] super().__init__( steps, **kwargs, ) class PreprocessingSteps(ProcessingStepGroup): r"""A changing referential preprocessing step according a given data set. Parameters ---------- data_set_id The data set id on which the transformation is to be performed. limb The modality regarding if the exercise is upper or lower limb. sensor The modality regarding the type of sensor either accelerometer or gyroscope. resample_freq Optionally, the frequency to which resample the data during the resample step. columns Optionally, the columns on which the preprocessing steps need to be applied. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelIdFilter` for convenience. """ def __init__( self, data_set_id: str, limb: LimbModality, sensor: SensorModality, resample_freq: Optional[float] = None, columns: Optional[List[str]] = None, level_filter: LevelFilterType = DefaultLevelFilter(), ): columns = columns or DEFAULT_COLUMNS extra_columns = [] if not isinstance(level_filter, LevelFilter): level_filter = LevelIdFilter(level_filter) # Need to be computed even if only gyroscope signals are preprocessed to make # sure `acc` data set is available to compute gravity rotation matrices steps: List[ProcessingStep] = [ TransformUserAcceleration(storage_error="ignore"), TransformGyroscope(storage_error="overwrite"), ] if sensor == SensorModality.ACCELEROMETER: data_set_id = "acc"
"""Core functionalities to preprocess signal data.""" class FilterSensorNoise(Apply): r"""Apply a filter that will remove any sensor noise into a given dataset. This filter is a Savitzky-Golay one. Parameters ---------- data_set_id The data set id on which the transformation is to be performed ('accelerometer', 'gyroscope'). columns The columns onto which the filtering step has to be applied. kwargs Additional arguments that are passed to the :meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This allows to provide additional values, such as placeholder values in value definitions to the actual processing function. Notes ----- The Savitzky-Golay is tuned as in [Martinez et. al. 2012]_ to remove sensor noise and to smooth the signal. The windows size is thus set up to 41 points and the filter is of order-3. """ def __init__(self, data_set_id: str, columns: Optional[List[str]] = None, **kwargs): columns = columns or DEFAULT_COLUMNS super().__init__( data_set_id=data_set_id, method=savgol_filter, method_kwargs=dict(window=41, order=3), columns=columns, new_data_set_id=f"{data_set_id}_svgf", drop_nan=True, **kwargs, ) class FilterPhysiologicalNoise(Apply): r"""Apply a filter that will remove any physiological noise into a dataset. This filter is a butterworth high-pass one. Parameters ---------- data_set_id The data set id on which the transformation is to be performed ('accelerometer', 'gyroscope'). columns The columns onto which the filtering step has to be applied. sampling_frequency Optional the initial sampling frequency. kwargs Additional arguments that are passed to the :meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This allows to provide additional values, such as placeholder values in value definitions to the actual processing function. Notes ----- The Butterwoth highpass filter is tuned as in [Martinez et. al. 2012]_ to remove physiological noise. The cut-off of is of 0.2HZ which is the standard breath frequency. .. [Martinez et. al. 2012] MARTINEZ-MENDEZ, Rigoberto, SEKINE, Masaki, et TAMURA, Toshiyo. Postural sway parameters using a triaxial accelerometer: comparing elderly and young healthy adults. Computer methods in biomechanics and biomedical engineering, 2012, vol. 15, no 9, p. 899-910. """ def __init__( self, data_set_id: str, columns: Optional[List[str]] = None, sampling_frequency: Optional[float] = None, **kwargs, ): columns = columns or DEFAULT_COLUMNS super().__init__( data_set_id=data_set_id, method=butterworth_high_pass_filter, method_kwargs=dict( order=2, cutoff=0.3, freq=sampling_frequency, zero_phase=True ), columns=columns, new_data_set_id=f"{data_set_id}_bhpf", drop_nan=True, **kwargs, ) class Detrend(Apply): r"""A detrending preprocessing step according a given data set. Parameters ---------- data_set_id The data set id on which the transformation is to be performed ('accelerometer', 'gyroscope'). columns The columns onto which the detrending steps have to be applied. kwargs Additional arguments that are passed to the :meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This allows to provide additional values, such as placeholder values in value definitions to the actual processing function. """ def __init__(self, data_set_id: str, columns: Optional[List[str]] = None, **kwargs): columns = columns or DEFAULT_COLUMNS super().__init__( data_set_id=data_set_id, method=detrend_signal, columns=columns, new_data_set_id=f"{data_set_id}_detrend", drop_nan=True, **kwargs, ) class AmplitudeRangeFilter(LevelFilter): r"""Filter aberrant signal amplitude. Parameters ---------- data_set_id The data set id on which the transformation is to be performed ('accelerometer', 'gyroscope'). max_amplitude A float which is the maximum expected amplitude values. min_amplitude A float which is the minimum expected amplitude values. columns The columns onto which the detrending steps have to be applied. """ def __init__( self, data_set_id: str, max_amplitude: float, min_amplitude: float, columns: Optional[List[str]] = None, ): self.data_set_id = data_set_id self.columns = columns self.max_amplitude = max_amplitude self.min_amplitude = min_amplitude def repr(self): """Get representation of the filter.""" return f"only {self.data_set_id} signal with acceptable amplitude>" def filter(self, levels: Iterable[Level]) -> Set[Level]: """Filter levels with acceptable signal amplitude.""" def _amplitude_filter(level: Level): if level.has_raw_data_set(self.data_set_id): data = level.get_raw_data_set(self.data_set_id).data if self.columns: data = data[self.columns] return check_amplitude(data, self.min_amplitude, self.max_amplitude) return True return set(filter(_amplitude_filter, levels)) class RotateFrame(ProcessingStepGroup): r"""A changing referential preprocessing step according a given data set. Parameters ---------- data_set_id The data set id on which the transformation is to be performed. gravity_data_set_id The dataset id containing the gravity components. frame The new desired frame. columns The columns onto which the resampling steps have to be applied. kwargs Additional arguments that are passed to the :meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This allows to provide additional values, such as placeholder values in value definitions to the actual processing function. """ def __init__( self, data_set_id: str, gravity_data_set_id: str, frame: Tuple[int, int, int], columns: Optional[List[str]] = None, **kwargs, ): columns = columns or DEFAULT_COLUMNS steps: List[ProcessingStep] = [ ComputeGravityRotationMatrices( gravity_data_set_id, frame, storage_error="ignore" ), RotateSensorWithGravityRotationMatrices( data_set_id, columns, ), ] super().__init__( steps, **kwargs, ) class PreprocessingSteps(ProcessingStepGroup): r"""A changing referential preprocessing step according a given data set. Parameters ---------- data_set_id The data set id on which the transformation is to be performed. limb The modality regarding if the exercise is upper or lower limb. sensor The modality regarding the type of sensor either accelerometer or gyroscope. resample_freq Optionally, the frequency to which resample the data during the resample step. columns Optionally, the columns on which the preprocessing steps need to be applied. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelIdFilter` for convenience. """ def __init__( self, data_set_id: str, limb: LimbModality, sensor: SensorModality, resample_freq: Optional[float] = None, columns: Optional[List[str]] = None, level_filter: LevelFilterType = DefaultLevelFilter(), ): columns = columns or DEFAULT_COLUMNS extra_columns = [] if not isinstance(level_filter, LevelFilter): level_filter = LevelIdFilter(level_filter) # Need to be computed even if only gyroscope signals are preprocessed to make # sure `acc` data set is available to compute gravity rotation matrices steps: List[ProcessingStep] = [ TransformUserAcceleration(storage_error="ignore"), TransformGyroscope(storage_error="overwrite"), ] if sensor == SensorModality.ACCELEROMETER: data_set_id = "acc"
extra_columns = GRAVITY_COLUMNS
2
2023-11-14 10:06:46+00:00
16k
believethehype/nostrdvm
nostr_dvm/dvm.py
[ { "identifier": "EventDefinitions", "path": "nostr_dvm/utils/definitions.py", "snippet": "class EventDefinitions:\n KIND_DM = 4\n KIND_ZAP = 9735\n KIND_ANNOUNCEMENT = 31990\n KIND_NIP94_METADATA = 1063\n KIND_FEEDBACK = 7000\n KIND_NIP90_EXTRACT_TEXT = 5000\n KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000\n KIND_NIP90_SUMMARIZE_TEXT = 5001\n KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000\n KIND_NIP90_TRANSLATE_TEXT = 5002\n KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000\n KIND_NIP90_GENERATE_TEXT = 5050\n KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000\n KIND_NIP90_GENERATE_IMAGE = 5100\n KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000\n KIND_NIP90_CONVERT_VIDEO = 5200\n KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000\n KIND_NIP90_GENERATE_VIDEO = 5202\n KIND_NIP90_TEXT_TO_SPEECH = 5250\n KIND_NIP90_RESULT_TEXT_TO_SPEECH = KIND_NIP90_TEXT_TO_SPEECH + 1000\n KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000\n KIND_NIP90_CONTENT_DISCOVERY = 5300\n KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000\n KIND_NIP90_PEOPLE_DISCOVERY = 5301\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000\n KIND_NIP90_CONTENT_SEARCH = 5302\n KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000\n KIND_NIP90_GENERIC = 5999\n KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000\n ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,\n KIND_NIP90_RESULT_SUMMARIZE_TEXT,\n KIND_NIP90_RESULT_TRANSLATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_IMAGE,\n KIND_NIP90_CONTENT_DISCOVERY,\n KIND_NIP90_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_CONVERT_VIDEO,\n KIND_NIP90_RESULT_CONTENT_DISCOVERY,\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_GENERATE_VIDEO,\n KIND_NIP90_RESULT_GENERIC]" }, { "identifier": "RequiredJobToWatch", "path": "nostr_dvm/utils/definitions.py", "snippet": "class RequiredJobToWatch:\n event: Event\n timestamp: int" }, { "identifier": "JobToWatch", "path": "nostr_dvm/utils/definitions.py", "snippet": "class JobToWatch:\n event: str\n timestamp: int\n is_paid: bool\n amount: int\n status: str\n result: str\n is_processed: bool\n bolt11: str\n payment_hash: str\n expires: int" }, { "identifier": "DVMConfig", "path": "nostr_dvm/utils/dvmconfig.py", "snippet": "class DVMConfig:\n SUPPORTED_DVMS = []\n PRIVATE_KEY: str = \"\"\n PUBLIC_KEY: str = \"\"\n FIX_COST: float = None\n PER_UNIT_COST: float = None\n\n RELAY_LIST = [\"wss://relay.damus.io\", \"wss://nostr-pub.wellorder.net\", \"wss://nos.lol\", \"wss://nostr.wine\",\n \"wss://nostr.mom\", \"wss://nostr.oxtr.dev\", \"wss://relay.nostr.bg\",\n \"wss://relay.f7z.io\", \"wss://pablof7z.nostr1.com\", \"wss://relay.nostr.net\", \"wss://140.f7z.io\",\n \"wss://relay.snort.social\", \"wss://offchain.pub/\", \"wss://relay.nostr.band\"]\n\n RELAY_TIMEOUT = 5\n EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external\n LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env\n LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.\n LNBITS_URL = 'https://lnbits.com'\n LN_ADDRESS = ''\n SCRIPT = ''\n IDENTIFIER = ''\n USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions\n DB: str\n NEW_USER_BALANCE: int = 0 # Free credits for new users\n NIP89: NIP89Config\n SHOW_RESULT_BEFORE_PAYMENT: bool = False # if this is true show results even when not paid right after autoprocess" }, { "identifier": "admin_make_database_updates", "path": "nostr_dvm/utils/admin_utils.py", "snippet": "def admin_make_database_updates(adminconfig: AdminConfig = None, dvmconfig: DVMConfig = None, client: Client = None):\n # This is called on start of Server, Admin function to manually whitelist/blacklist/add balance/delete users\n if adminconfig is None or dvmconfig is None:\n return\n\n if not isinstance(adminconfig, AdminConfig):\n return\n\n if ((\n adminconfig.WHITELISTUSER is True or adminconfig.UNWHITELISTUSER is True or adminconfig.BLACKLISTUSER is True or adminconfig.DELETEUSER is True)\n and adminconfig.USERNPUB == \"\"):\n return\n\n if adminconfig.UPDATE_PROFILE and (dvmconfig.NIP89 is None):\n return\n\n if adminconfig.DELETE_NIP89 and (adminconfig.EVENTID == \"\" or adminconfig.EVENTID == \"\"):\n return\n\n db = dvmconfig.DB\n\n if str(adminconfig.USERNPUB).startswith(\"npub\"):\n publickey = PublicKey.from_bech32(adminconfig.USERNPUB).to_hex()\n else:\n publickey = adminconfig.USERNPUB\n\n if adminconfig.WHITELISTUSER:\n user = get_or_add_user(db, publickey, client=client, config=dvmconfig)\n update_sql_table(db, user.npub, user.balance, True, False, user.nip05, user.lud16, user.name, user.lastactive)\n user = get_from_sql_table(db, publickey)\n print(str(user.name) + \" is whitelisted: \" + str(user.iswhitelisted))\n\n if adminconfig.UNWHITELISTUSER:\n user = get_from_sql_table(db, publickey)\n update_sql_table(db, user.npub, user.balance, False, False, user.nip05, user.lud16, user.name, user.lastactive)\n\n if adminconfig.BLACKLISTUSER:\n user = get_from_sql_table(db, publickey)\n update_sql_table(db, user.npub, user.balance, False, True, user.nip05, user.lud16, user.name, user.lastactive)\n\n if adminconfig.DELETEUSER:\n delete_from_sql_table(db, publickey)\n\n if adminconfig.ClEANDB:\n clean_db(db)\n\n if adminconfig.LISTDATABASE:\n list_db(db)\n\n if adminconfig.REBROADCAST_NIP89:\n nip89_announce_tasks(dvmconfig, client=client)\n\n if adminconfig.DELETE_NIP89:\n event_id = adminconfig.EVENTID\n keys = Keys.from_sk_str(\n adminconfig.PRIVKEY) # Private key from sender of Event (e.g. the key of an nip89 announcement you want to delete)\n fetch_nip89_paramters_for_deletion(keys, event_id, client, dvmconfig)\n\n if adminconfig.UPDATE_PROFILE:\n update_profile(dvmconfig, client, lud16=adminconfig.LUD16)" }, { "identifier": "AdminConfig", "path": "nostr_dvm/utils/admin_utils.py", "snippet": "class AdminConfig:\n REBROADCAST_NIP89: bool = False\n UPDATE_PROFILE: bool = False\n DELETE_NIP89: bool = False\n WHITELISTUSER: bool = False\n UNWHITELISTUSER: bool = False\n BLACKLISTUSER: bool = False\n DELETEUSER: bool = False\n LISTDATABASE: bool = False\n ClEANDB: bool = False\n\n USERNPUB: str = \"\"\n LUD16: str = \"\"\n\n EVENTID: str = \"\"\n PRIVKEY: str = \"\"" }, { "identifier": "get_amount_per_task", "path": "nostr_dvm/utils/backend_utils.py", "snippet": "def get_amount_per_task(task, dvm_config, duration=1):\n # duration is either static 1 (for images etc) or in seconds by default (e.g. audio/video)\n for dvm in dvm_config.SUPPORTED_DVMS: # this is currently just one\n if dvm.TASK == task:\n amount = dvm.FIX_COST + (dvm.PER_UNIT_COST * duration)\n return amount\n else:\n print(\"[\" + dvm_config.SUPPORTED_DVMS[\n 0].NAME + \"] Task \" + task + \" is currently not supported by this instance, skipping\")\n return None" }, { "identifier": "check_task_is_supported", "path": "nostr_dvm/utils/backend_utils.py", "snippet": "def check_task_is_supported(event: Event, client, config=None):\n try:\n dvm_config = config\n # Check for generic issues, event maformed, referenced event not found etc..\n if not is_input_supported_generic(event.tags(), client, dvm_config):\n return False, \"\"\n\n # See if current dvm supports the task\n task = get_task(event, client=client, dvm_config=dvm_config)\n if task not in (x.TASK for x in dvm_config.SUPPORTED_DVMS):\n return False, task\n # See if current dvm can handle input for given task\n for dvm in dvm_config.SUPPORTED_DVMS:\n if dvm.TASK == task:\n if not dvm.is_input_supported(event.tags(), client, config):\n return False, task\n return True, task\n\n\n except Exception as e:\n print(\"Check task: \" + str(e))" }, { "identifier": "get_task", "path": "nostr_dvm/utils/backend_utils.py", "snippet": "def get_task(event, client, dvm_config):\n try:\n if event.kind() == EventDefinitions.KIND_NIP90_GENERIC: # use this for events that have no id yet, inclufr j tag\n for tag in event.tags():\n if tag.as_vec()[0] == 'j':\n return tag.as_vec()[1]\n else:\n return \"unknown job: \" + event.as_json()\n elif event.kind() == EventDefinitions.KIND_DM: # dm\n for tag in event.tags():\n if tag.as_vec()[0] == 'j':\n return tag.as_vec()[1]\n else:\n return \"unknown job: \" + event.as_json()\n\n # This looks a bit more complicated, but we do several tasks for text-extraction in the future\n elif event.kind() == EventDefinitions.KIND_NIP90_EXTRACT_TEXT:\n for tag in event.tags():\n if tag.as_vec()[0] == \"i\":\n if tag.as_vec()[2] == \"url\":\n file_type = check_url_is_readable(tag.as_vec()[1])\n print(file_type)\n if file_type == \"pdf\":\n return \"pdf-to-text\"\n elif file_type == \"audio\" or file_type == \"video\":\n return \"speech-to-text\"\n elif file_type == \"image\":\n return \"image-to-text\"\n else:\n return \"unknown job\"\n elif tag.as_vec()[2] == \"event\":\n evt = get_event_by_id(tag.as_vec()[1], client=client, config=dvm_config)\n if evt is not None:\n if evt.kind() == 1063:\n for tg in evt.tags():\n if tg.as_vec()[0] == 'url':\n file_type = check_url_is_readable(tg.as_vec()[1])\n if file_type == \"pdf\":\n return \"pdf-to-text\"\n elif file_type == \"audio\" or file_type == \"video\":\n return \"speech-to-text\"\n else:\n return \"unknown job\"\n else:\n return \"unknown type\"\n else:\n return \"unknown job\"\n elif event.kind() == EventDefinitions.KIND_NIP90_GENERATE_IMAGE:\n has_image_tag = False\n has_text_tag = False\n for tag in event.tags():\n if tag.as_vec()[0] == \"i\":\n if tag.as_vec()[2] == \"url\":\n file_type = check_url_is_readable(tag.as_vec()[1])\n if file_type == \"image\":\n has_image_tag = True\n print(\"found image tag\")\n elif tag.as_vec()[2] == \"job\":\n evt = get_referenced_event_by_id(event_id=tag.as_vec()[1], kinds=\n [EventDefinitions.KIND_NIP90_RESULT_EXTRACT_TEXT,\n EventDefinitions.KIND_NIP90_RESULT_TRANSLATE_TEXT,\n EventDefinitions.KIND_NIP90_RESULT_SUMMARIZE_TEXT],\n client=client,\n dvm_config=dvm_config)\n if evt is not None:\n file_type = check_url_is_readable(evt.content())\n if file_type == \"image\":\n has_image_tag = True\n elif tag.as_vec()[2] == \"text\":\n has_text_tag = True\n\n if has_image_tag:\n return \"image-to-image\"\n elif has_text_tag and not has_image_tag:\n return \"text-to-image\"\n # TODO if a task can consist of multiple inputs add them here\n # This is not ideal. Maybe such events should have their own kind\n\n # else if kind is supported, simply return task\n else:\n\n for dvm in dvm_config.SUPPORTED_DVMS:\n if dvm.KIND == event.kind():\n return dvm.TASK\n except Exception as e:\n print(\"Get task: \" + str(e))\n\n return \"unknown type\"" }, { "identifier": "create_sql_table", "path": "nostr_dvm/utils/database_utils.py", "snippet": "def create_sql_table(db):\n try:\n import os\n if not os.path.exists(r'db'):\n os.makedirs(r'db')\n if not os.path.exists(r'outputs'):\n os.makedirs(r'outputs')\n con = sqlite3.connect(db)\n cur = con.cursor()\n cur.execute(\"\"\" CREATE TABLE IF NOT EXISTS users (\n npub text PRIMARY KEY,\n sats integer NOT NULL,\n iswhitelisted boolean,\n isblacklisted boolean,\n nip05 text,\n lud16 text,\n name text,\n lastactive integer\n ); \"\"\")\n cur.execute(\"SELECT name FROM sqlite_master\")\n con.close()\n\n except Error as e:\n print(e)" }, { "identifier": "get_or_add_user", "path": "nostr_dvm/utils/database_utils.py", "snippet": "def get_or_add_user(db, npub, client, config, update=False):\n user = get_from_sql_table(db, npub)\n if user is None:\n try:\n name, nip05, lud16 = fetch_user_metadata(npub, client)\n print(\"Adding User: \" + npub + \" (\" + npub + \")\")\n add_to_sql_table(db, npub, config.NEW_USER_BALANCE, False, False, nip05,\n lud16, name, Timestamp.now().as_secs())\n user = get_from_sql_table(db, npub)\n return user\n except Exception as e:\n print(\"Error Adding User to DB: \" + str(e))\n elif update:\n try:\n name, nip05, lud16 = fetch_user_metadata(npub, client)\n print(\"Updating User: \" + npub + \" (\" + npub + \")\")\n update_sql_table(db, user.npub, user.balance, user.iswhitelisted, user.isblacklisted, nip05,\n lud16, name, Timestamp.now().as_secs())\n user = get_from_sql_table(db, npub)\n return user\n except Exception as e:\n print(\"Error Updating User in DB: \" + str(e))\n\n return user" }, { "identifier": "update_user_balance", "path": "nostr_dvm/utils/database_utils.py", "snippet": "def update_user_balance(db, npub, additional_sats, client, config):\n user = get_from_sql_table(db, npub)\n if user is None:\n name, nip05, lud16 = fetch_user_metadata(npub, client)\n add_to_sql_table(db, npub, (int(additional_sats) + config.NEW_USER_BALANCE), False, False,\n nip05, lud16, name, Timestamp.now().as_secs())\n print(\"Adding User: \" + npub + \" (\" + npub + \")\")\n else:\n user = get_from_sql_table(db, npub)\n new_balance = int(user.balance) + int(additional_sats)\n update_sql_table(db, npub, new_balance, user.iswhitelisted, user.isblacklisted, user.nip05, user.lud16,\n user.name,\n Timestamp.now().as_secs())\n print(\"Updated user balance for: \" + str(user.name) +\n \" Zap amount: \" + str(additional_sats) + \" Sats. New balance: \" + str(new_balance) +\" Sats\")\n\n if config is not None:\n keys = Keys.from_sk_str(config.PRIVATE_KEY)\n #time.sleep(1.0)\n\n message = (\"Added \" + str(additional_sats) + \" Sats to balance. New balance is \" + str(new_balance) + \" Sats.\")\n\n evt = EventBuilder.new_encrypted_direct_msg(keys, PublicKey.from_hex(npub), message,\n None).to_event(keys)\n send_event(evt, client=client, dvm_config=config)" }, { "identifier": "update_sql_table", "path": "nostr_dvm/utils/database_utils.py", "snippet": "def update_sql_table(db, npub, balance, iswhitelisted, isblacklisted, nip05, lud16, name, lastactive):\n try:\n con = sqlite3.connect(db)\n cur = con.cursor()\n data = (balance, iswhitelisted, isblacklisted, nip05, lud16, name, lastactive, npub)\n\n cur.execute(\"\"\" UPDATE users\n SET sats = ? ,\n iswhitelisted = ? ,\n isblacklisted = ? ,\n nip05 = ? ,\n lud16 = ? ,\n name = ? ,\n lastactive = ?\n WHERE npub = ?\"\"\", data)\n con.commit()\n con.close()\n except Error as e:\n print(\"Error Updating DB: \" + str(e))" }, { "identifier": "input_data_file_duration", "path": "nostr_dvm/utils/mediasource_utils.py", "snippet": "def input_data_file_duration(event, dvm_config, client, start=0, end=0):\n # print(\"[\" + dvm_config.NIP89.NAME + \"] Getting Duration of the Media file..\")\n input_value = \"\"\n input_type = \"\"\n for tag in event.tags():\n if tag.as_vec()[0] == 'i':\n input_value = tag.as_vec()[1]\n input_type = tag.as_vec()[2]\n\n if input_type == \"text\":\n return len(input_value)\n\n if input_type == \"event\": # NIP94 event\n evt = get_event_by_id(input_value, client=client, config=dvm_config)\n if evt is not None:\n input_value, input_type = check_nip94_event_for_media(evt, input_value, input_type)\n if input_type == \"text\":\n # For now, ingore length of any text, just return 1.\n return len(input_value)\n\n if input_type == \"url\":\n source_type = check_source_type(input_value)\n\n filename, start, end, type = get_file_start_end_type(input_value, source_type, start, end, True)\n if type != \"audio\" and type != \"video\":\n return 1\n if filename == \"\" or filename is None:\n return 0\n try:\n file_reader = AudioReader(filename, ctx=cpu(0), mono=False)\n duration = float(file_reader.duration())\n except Exception as e:\n print(e)\n return 0\n print(\"Original Duration of the Media file: \" + str(duration))\n start_time, end_time, new_duration = (\n convert_media_length(start, end, duration))\n print(\"New Duration of the Media file: \" + str(new_duration))\n return new_duration\n\n return 1" }, { "identifier": "get_event_by_id", "path": "nostr_dvm/utils/nostr_utils.py", "snippet": "def get_event_by_id(event_id: str, client: Client, config=None) -> Event | None:\n split = event_id.split(\":\")\n if len(split) == 3:\n pk = PublicKey.from_hex(split[1])\n id_filter = Filter().author(pk).custom_tag(Alphabet.D, [split[2]])\n events = client.get_events_of([id_filter], timedelta(seconds=config.RELAY_TIMEOUT))\n else:\n if str(event_id).startswith('note'):\n event_id = EventId.from_bech32(event_id)\n elif str(event_id).startswith(\"nevent\"):\n event_id = Nip19Event.from_bech32(event_id).event_id()\n elif str(event_id).startswith('nostr:note'):\n event_id = EventId.from_nostr_uri(event_id)\n elif str(event_id).startswith(\"nostr:nevent\"):\n event_id = Nip19Event.from_nostr_uri(event_id).event_id()\n\n else:\n event_id = EventId.from_hex(event_id)\n\n id_filter = Filter().id(event_id).limit(1)\n events = client.get_events_of([id_filter], timedelta(seconds=config.RELAY_TIMEOUT))\n if len(events) > 0:\n\n return events[0]\n else:\n return None" }, { "identifier": "get_referenced_event_by_id", "path": "nostr_dvm/utils/nostr_utils.py", "snippet": "def get_referenced_event_by_id(event_id, client, dvm_config, kinds) -> Event | None:\n if kinds is None:\n kinds = []\n if str(event_id).startswith('note'):\n event_id = EventId.from_bech32(event_id)\n elif str(event_id).startswith(\"nevent\"):\n event_id = Nip19Event.from_bech32(event_id).event_id()\n elif str(event_id).startswith('nostr:note'):\n event_id = EventId.from_nostr_uri(event_id)\n elif str(event_id).startswith(\"nostr:nevent\"):\n event_id = Nip19Event.from_nostr_uri(event_id).event_id()\n else:\n event_id = EventId.from_hex(event_id)\n\n if len(kinds) > 0:\n job_id_filter = Filter().kinds(kinds).event(event_id).limit(1)\n else:\n job_id_filter = Filter().event(event_id).limit(1)\n\n events = client.get_events_of([job_id_filter], timedelta(seconds=dvm_config.RELAY_TIMEOUT))\n\n if len(events) > 0:\n return events[0]\n else:\n return None" }, { "identifier": "send_event", "path": "nostr_dvm/utils/nostr_utils.py", "snippet": "def send_event(event: Event, client: Client, dvm_config) -> EventId:\n try:\n relays = []\n\n for tag in event.tags():\n if tag.as_vec()[0] == 'relays':\n for index, param in enumerate(tag.as_vec()):\n if index != 0:\n relays.append(tag.as_vec()[index])\n\n for relay in relays:\n if relay not in dvm_config.RELAY_LIST:\n client.add_relay(relay)\n\n event_id = client.send_event(event)\n\n for relay in relays:\n if relay not in dvm_config.RELAY_LIST:\n client.remove_relay(relay)\n\n return event_id\n except Exception as e:\n print(e)" }, { "identifier": "check_and_decrypt_tags", "path": "nostr_dvm/utils/nostr_utils.py", "snippet": "def check_and_decrypt_tags(event, dvm_config):\n try:\n\n is_encrypted = False\n p = \"\"\n for tag in event.tags():\n if tag.as_vec()[0] == 'encrypted':\n is_encrypted = True\n elif tag.as_vec()[0] == 'p':\n p = tag.as_vec()[1]\n\n if is_encrypted:\n if p != dvm_config.PUBLIC_KEY:\n print(\"[\" + dvm_config.NIP89.NAME + \"] Task encrypted and not addressed to this DVM, \"\n \"skipping..\")\n return None\n\n elif p == dvm_config.PUBLIC_KEY:\n tags_str = nip04_decrypt(Keys.from_sk_str(dvm_config.PRIVATE_KEY).secret_key(),\n event.pubkey(), event.content())\n params = json.loads(tags_str)\n params.append(Tag.parse([\"p\", p]).as_vec())\n params.append(Tag.parse([\"encrypted\"]).as_vec())\n event_as_json = json.loads(event.as_json())\n event_as_json['tags'] = params\n event_as_json['content'] = \"\"\n event = Event.from_json(json.dumps(event_as_json))\n except Exception as e:\n print(e)\n\n return event" }, { "identifier": "build_status_reaction", "path": "nostr_dvm/utils/output_utils.py", "snippet": "def build_status_reaction(status, task, amount, content):\n alt_description = \"This is a reaction to a NIP90 DVM AI task. \"\n\n if status == \"processing\":\n alt_description = \"NIP90 DVM AI task \" + task + \" started processing. \"\n reaction = alt_description + emoji.emojize(\":thumbs_up:\")\n elif status == \"success\":\n alt_description = \"NIP90 DVM AI task \" + task + \" finished successfully. \"\n reaction = alt_description + emoji.emojize(\":call_me_hand:\")\n elif status == \"chain-scheduled\":\n alt_description = \"NIP90 DVM AI task \" + task + \" Chain Task scheduled\"\n reaction = alt_description + emoji.emojize(\":thumbs_up:\")\n elif status == \"error\":\n alt_description = \"NIP90 DVM AI task \" + task + \" had an error. \"\n if content is None:\n reaction = alt_description + emoji.emojize(\":thumbs_down:\")\n else:\n reaction = alt_description + emoji.emojize(\":thumbs_down:\") + \" \" + content\n\n elif status == \"payment-required\":\n alt_description = \"NIP90 DVM AI task \" + task + \" requires payment of min \" + str(\n amount) + \" Sats. \"\n reaction = alt_description + emoji.emojize(\":orange_heart:\")\n\n elif status == \"payment-rejected\":\n alt_description = \"NIP90 DVM AI task \" + task + \" payment is below required amount of \" + str(\n amount) + \" Sats. \"\n reaction = alt_description + emoji.emojize(\":thumbs_down:\")\n elif status == \"user-blocked-from-service\":\n alt_description = \"NIP90 DVM AI task \" + task + \" can't be performed. User has been blocked from Service. \"\n reaction = alt_description + emoji.emojize(\":thumbs_down:\")\n else:\n reaction = emoji.emojize(\":thumbs_down:\")\n\n return alt_description, reaction" }, { "identifier": "check_bolt11_ln_bits_is_paid", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def check_bolt11_ln_bits_is_paid(payment_hash: str, config):\n url = config.LNBITS_URL + \"/api/v1/payments/\" + payment_hash\n headers = {'X-API-Key': config.LNBITS_INVOICE_KEY, 'Content-Type': 'application/json', 'charset': 'UTF-8'}\n try:\n res = requests.get(url, headers=headers, proxies=proxies)\n obj = json.loads(res.text)\n if obj.get(\"paid\"):\n return obj[\"paid\"]\n else:\n return False\n except Exception as e:\n return None" }, { "identifier": "create_bolt11_ln_bits", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def create_bolt11_ln_bits(sats: int, config) -> (str, str):\n if config.LNBITS_URL == \"\":\n return None, None\n url = config.LNBITS_URL + \"/api/v1/payments\"\n data = {'out': False, 'amount': sats, 'memo': \"Nostr-DVM \" + config.NIP89.NAME}\n headers = {'X-API-Key': config.LNBITS_INVOICE_KEY, 'Content-Type': 'application/json', 'charset': 'UTF-8'}\n try:\n res = requests.post(url, json=data, headers=headers)\n obj = json.loads(res.text)\n if obj.get(\"payment_request\") and obj.get(\"payment_hash\"):\n return obj[\"payment_request\"], obj[\"payment_hash\"] #\n else:\n print(\"LNBITS: \" + res.text)\n return None, None\n except Exception as e:\n print(\"LNBITS: \" + str(e))\n return None, None" }, { "identifier": "parse_zap_event_tags", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def parse_zap_event_tags(zap_event, keys, name, client, config):\n zapped_event = None\n invoice_amount = 0\n anon = False\n message = \"\"\n sender = zap_event.pubkey()\n for tag in zap_event.tags():\n if tag.as_vec()[0] == 'bolt11':\n invoice_amount = parse_amount_from_bolt11_invoice(tag.as_vec()[1])\n elif tag.as_vec()[0] == 'e':\n zapped_event = get_event_by_id(tag.as_vec()[1], client=client, config=config)\n zapped_event = check_and_decrypt_own_tags(zapped_event, config)\n elif tag.as_vec()[0] == 'p':\n p_tag = tag.as_vec()[1]\n elif tag.as_vec()[0] == 'description':\n zap_request_event = Event.from_json(tag.as_vec()[1])\n sender = check_for_zapplepay(zap_request_event.pubkey().to_hex(),\n zap_request_event.content())\n for z_tag in zap_request_event.tags():\n if z_tag.as_vec()[0] == 'anon':\n if len(z_tag.as_vec()) > 1:\n # print(\"[\" + name + \"] Private Zap received.\")\n decrypted_content = decrypt_private_zap_message(z_tag.as_vec()[1],\n keys.secret_key(),\n zap_request_event.pubkey())\n decrypted_private_event = Event.from_json(decrypted_content)\n if decrypted_private_event.kind() == 9733:\n sender = decrypted_private_event.pubkey().to_hex()\n message = decrypted_private_event.content()\n # if message != \"\":\n # print(\"Zap Message: \" + message)\n else:\n anon = True\n print(\n \"[\" + name + \"] Anonymous Zap received. Unlucky, I don't know from whom, and never will\")\n\n return invoice_amount, zapped_event, sender, message, anon" }, { "identifier": "parse_amount_from_bolt11_invoice", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def parse_amount_from_bolt11_invoice(bolt11_invoice: str) -> int:\n def get_index_of_first_letter(ip):\n index = 0\n for c in ip:\n if c.isalpha():\n return index\n else:\n index = index + 1\n return len(ip)\n\n remaining_invoice = bolt11_invoice[4:]\n index = get_index_of_first_letter(remaining_invoice)\n identifier = remaining_invoice[index]\n number_string = remaining_invoice[:index]\n number = float(number_string)\n if identifier == 'm':\n number = number * 100000000 * 0.001\n elif identifier == 'u':\n number = number * 100000000 * 0.000001\n elif identifier == 'n':\n number = number * 100000000 * 0.000000001\n elif identifier == 'p':\n number = number * 100000000 * 0.000000000001\n\n return int(number)" }, { "identifier": "zaprequest", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def zaprequest(lud16: str, amount: int, content, zapped_event, zapped_user, keys, relay_list, zaptype=\"public\"):\n if lud16.startswith(\"LNURL\") or lud16.startswith(\"lnurl\"):\n url = lnurl.decode(lud16)\n elif '@' in lud16: # LNaddress\n url = 'https://' + str(lud16).split('@')[1] + '/.well-known/lnurlp/' + str(lud16).split('@')[0]\n else: # No lud16 set or format invalid\n return None\n try:\n response = requests.get(url)\n ob = json.loads(response.content)\n callback = ob[\"callback\"]\n encoded_lnurl = lnurl.encode(url)\n amount_tag = Tag.parse(['amount', str(amount * 1000)])\n relays_tag = Tag.parse(['relays', str(relay_list)])\n lnurl_tag = Tag.parse(['lnurl', encoded_lnurl])\n if zapped_event is not None:\n p_tag = Tag.parse(['p', zapped_event.pubkey().to_hex()])\n e_tag = Tag.parse(['e', zapped_event.id().to_hex()])\n tags = [amount_tag, relays_tag, p_tag, e_tag, lnurl_tag]\n else:\n p_tag = Tag.parse(['p', zapped_user.to_hex()])\n tags = [amount_tag, relays_tag, p_tag, lnurl_tag]\n\n\n if zaptype == \"private\":\n key_str = keys.secret_key().to_hex() + zapped_event.id().to_hex() + str(zapped_event.created_at().as_secs())\n encryption_key = sha256(key_str.encode('utf-8')).hexdigest()\n\n zap_request = EventBuilder(9733, content,\n [p_tag, e_tag]).to_event(keys).as_json()\n keys = Keys.from_sk_str(encryption_key)\n encrypted_content = enrypt_private_zap_message(zap_request, keys.secret_key(), zapped_event.pubkey())\n anon_tag = Tag.parse(['anon', encrypted_content])\n tags.append(anon_tag)\n content = \"\"\n\n zap_request = EventBuilder(9734, content,\n tags).to_event(keys).as_json()\n\n response = requests.get(callback + \"?amount=\" + str(int(amount) * 1000) + \"&nostr=\" + urllib.parse.quote_plus(\n zap_request) + \"&lnurl=\" + encoded_lnurl)\n ob = json.loads(response.content)\n return ob[\"pr\"]\n\n except Exception as e:\n print(\"ZAP REQUEST: \" + e)\n return None" }, { "identifier": "pay_bolt11_ln_bits", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def pay_bolt11_ln_bits(bolt11: str, config):\n url = config.LNBITS_URL + \"/api/v1/payments\"\n data = {'out': True, 'bolt11': bolt11}\n headers = {'X-API-Key': config.LNBITS_ADMIN_KEY, 'Content-Type': 'application/json', 'charset': 'UTF-8'}\n try:\n res = requests.post(url, json=data, headers=headers)\n obj = json.loads(res.text)\n if obj.get(\"payment_hash\"):\n return obj[\"payment_hash\"]\n else:\n return \"Error\"\n except Exception as e:\n print(\"LNBITS: \" + str(e))\n return \"Error\"" }, { "identifier": "create_bolt11_lud16", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def create_bolt11_lud16(lud16, amount):\n if lud16.startswith(\"LNURL\") or lud16.startswith(\"lnurl\"):\n url = lnurl.decode(lud16)\n elif '@' in lud16: # LNaddress\n url = 'https://' + str(lud16).split('@')[1] + '/.well-known/lnurlp/' + str(lud16).split('@')[0]\n else: # No lud16 set or format invalid\n return None\n try:\n print(url)\n response = requests.get(url)\n ob = json.loads(response.content)\n callback = ob[\"callback\"]\n response = requests.get(callback + \"?amount=\" + str(int(amount) * 1000))\n ob = json.loads(response.content)\n return ob[\"pr\"]\n except Exception as e:\n print(\"LUD16: \" + e)\n return None" }, { "identifier": "redeem_cashu", "path": "nostr_dvm/utils/cashu_utils.py", "snippet": "def redeem_cashu(cashu, config, client, required_amount=0, update_self=False) -> (bool, str, int, int):\n proofs, mint, total_amount, message = parse_cashu(cashu)\n if message is not None:\n return False, message, 0, 0\n\n estimated_fees = max(int(total_amount * 0.02), 3)\n estimated_redeem_invoice_amount = total_amount - estimated_fees\n\n # Not sure if this the best way to go, we first create an invoice that we send to the mint, we catch the fees\n # for that invoice, and create another invoice with the amount without fees to melt.\n if config.LNBITS_INVOICE_KEY != \"\":\n invoice, paymenthash = create_bolt11_ln_bits(estimated_redeem_invoice_amount, config)\n else:\n\n user = get_or_add_user(db=config.DB, npub=config.PUBLIC_KEY,\n client=client, config=config, update=update_self)\n invoice = create_bolt11_lud16(user.lud16, estimated_redeem_invoice_amount)\n print(invoice)\n if invoice is None:\n return False, \"couldn't create invoice\", 0, 0\n\n url = mint + \"/checkfees\" # Melt cashu tokens at Mint\n json_object = {\"pr\": invoice}\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n request_body = json.dumps(json_object).encode('utf-8')\n request = requests.post(url, data=request_body, headers=headers)\n tree = json.loads(request.text)\n fees = tree[\"fee\"]\n print(\"Fees on this mint are \" + str(fees) + \" Sats\")\n redeem_invoice_amount = total_amount -fees\n if redeem_invoice_amount < required_amount:\n err = (\"Token value (Payment: \" + str(total_amount) + \" Sats. Fees: \" +\n str(fees) + \" Sats) below required amount of \" + str(required_amount)\n + \" Sats. Cashu token has not been claimed.\")\n print(\"[\" + config.NIP89.NAME + \"] \" + err)\n return False, err, 0, 0\n\n if config.LNBITS_INVOICE_KEY != \"\":\n invoice, paymenthash = create_bolt11_ln_bits(redeem_invoice_amount, config)\n else:\n\n user = get_or_add_user(db=config.DB, npub=config.PUBLIC_KEY,\n client=client, config=config, update=update_self)\n invoice = create_bolt11_lud16(user.lud16, redeem_invoice_amount)\n print(invoice)\n\n try:\n url = mint + \"/melt\" # Melt cashu tokens at Mint\n json_object = {\"proofs\": proofs, \"pr\": invoice}\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n request_body = json.dumps(json_object).encode('utf-8')\n request = requests.post(url, data=request_body, headers=headers)\n tree = json.loads(request.text)\n print(request.text)\n is_paid = tree[\"paid\"] if tree.get(\"paid\") else False\n print(is_paid)\n if is_paid:\n print(\"cashu token redeemed\")\n return True, \"success\", redeem_invoice_amount, fees\n else:\n msg = tree.get(\"detail\").split('.')[0].strip() if tree.get(\"detail\") else None\n print(msg)\n return False, msg, redeem_invoice_amount, fees\n except Exception as e:\n print(e)\n\n return False, \"\", redeem_invoice_amount, fees" } ]
import json import os import subprocess import time from datetime import timedelta from sys import platform from nostr_sdk import PublicKey, Keys, Client, Tag, Event, EventBuilder, Filter, HandleNotification, Timestamp, \ init_logger, LogLevel, Options, nip04_encrypt, ClientSigner from nostr_dvm.utils.definitions import EventDefinitions, RequiredJobToWatch, JobToWatch from nostr_dvm.utils.dvmconfig import DVMConfig from nostr_dvm.utils.admin_utils import admin_make_database_updates, AdminConfig from nostr_dvm.utils.backend_utils import get_amount_per_task, check_task_is_supported, get_task from nostr_dvm.utils.database_utils import create_sql_table, get_or_add_user, update_user_balance, update_sql_table from nostr_dvm.utils.mediasource_utils import input_data_file_duration from nostr_dvm.utils.nostr_utils import get_event_by_id, get_referenced_event_by_id, send_event, check_and_decrypt_tags from nostr_dvm.utils.output_utils import build_status_reaction from nostr_dvm.utils.zap_utils import check_bolt11_ln_bits_is_paid, create_bolt11_ln_bits, parse_zap_event_tags, \ parse_amount_from_bolt11_invoice, zaprequest, pay_bolt11_ln_bits, create_bolt11_lud16 from nostr_dvm.utils.cashu_utils import redeem_cashu
12,633
except Exception as e: print(e) bolt11 = None elif dvm_config.LN_ADDRESS != "": try: bolt11, payment_hash = create_bolt11_lud16(dvm_config.LN_ADDRESS, amount) except Exception as e: print(e) bolt11 = None if not any(x.event == original_event for x in self.job_list): self.job_list.append( JobToWatch(event=original_event, timestamp=original_event.created_at().as_secs(), amount=amount, is_paid=is_paid, status=status, result="", is_processed=False, bolt11=bolt11, payment_hash=payment_hash, expires=expires)) # print(str(self.job_list)) if (status == "payment-required" or status == "payment-rejected" or ( status == "processing" and not is_paid) or (status == "success" and not is_paid)): if dvm_config.LNBITS_INVOICE_KEY != "": amount_tag = Tag.parse(["amount", str(amount * 1000), bolt11]) else: amount_tag = Tag.parse(["amount", str(amount * 1000)]) # to millisats reply_tags.append(amount_tag) if encrypted: content_tag = Tag.parse(["content", reaction]) reply_tags.append(content_tag) str_tags = [] for element in reply_tags: str_tags.append(element.as_vec()) content = json.dumps(str_tags) content = nip04_encrypt(self.keys.secret_key(), PublicKey.from_hex(original_event.pubkey().to_hex()), content) reply_tags = encryption_tags else: content = reaction keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY) reaction_event = EventBuilder(EventDefinitions.KIND_FEEDBACK, str(content), reply_tags).to_event(keys) send_event(reaction_event, client=self.client, dvm_config=self.dvm_config) print("[" + self.dvm_config.NIP89.NAME + "]" + ": Sent Kind " + str( EventDefinitions.KIND_FEEDBACK) + " Reaction: " + status + " " + reaction_event.as_json()) return reaction_event.as_json() def do_work(job_event, amount): if ((EventDefinitions.KIND_NIP90_EXTRACT_TEXT <= job_event.kind() <= EventDefinitions.KIND_NIP90_GENERIC) or job_event.kind() == EventDefinitions.KIND_DM): task = get_task(job_event, client=self.client, dvm_config=self.dvm_config) for dvm in self.dvm_config.SUPPORTED_DVMS: result = "" try: if task == dvm.TASK: request_form = dvm.create_request_from_nostr_event(job_event, self.client, self.dvm_config) if dvm_config.USE_OWN_VENV: python_location = "/bin/python" if platform == "win32": python_location = "/Scripts/python" python_bin = (r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0] + python_location) retcode = subprocess.call([python_bin, dvm_config.SCRIPT, '--request', json.dumps(request_form), '--identifier', dvm_config.IDENTIFIER, '--output', 'output.txt']) print("Finished processing, loading data..") with open(os.path.abspath('output.txt')) as f: resultall = f.readlines() for line in resultall: if line != '\n': result += line os.remove(os.path.abspath('output.txt')) assert not result.startswith("Error:") print(result) else: # Some components might have issues with running code in otuside venv. # We install locally in these cases for now result = dvm.process(request_form) try: post_processed = dvm.post_process(str(result), job_event) send_nostr_reply_event(post_processed, job_event.as_json()) except Exception as e: send_job_status_reaction(job_event, "error", content=str(e), dvm_config=self.dvm_config) except Exception as e: # we could send the exception here to the user, but maybe that's not a good idea after all. send_job_status_reaction(job_event, "error", content=result, dvm_config=self.dvm_config) # Zapping back the user on error if amount > 0 and self.dvm_config.LNBITS_ADMIN_KEY != "": user = get_or_add_user(self.dvm_config.DB, job_event.pubkey().to_hex(), client=self.client, config=self.dvm_config) print(user.lud16 + " " + str(amount)) bolt11 = zaprequest(user.lud16, amount, "Couldn't finish job, returning sats", job_event, user.npub, self.keys, self.dvm_config.RELAY_LIST, zaptype="private") if bolt11 is None: print("Receiver has no Lightning address, can't zap back.") return try: payment_hash = pay_bolt11_ln_bits(bolt11, self.dvm_config) except Exception as e: print(e) return self.client.handle_notifications(NotificationHandler()) while True: for job in self.job_list: if job.bolt11 != "" and job.payment_hash != "" and not job.is_paid:
use_logger = False if use_logger: init_logger(LogLevel.DEBUG) class DVM: dvm_config: DVMConfig admin_config: AdminConfig keys: Keys client: Client job_list: list jobs_on_hold_list: list def __init__(self, dvm_config, admin_config=None): self.dvm_config = dvm_config self.admin_config = admin_config self.keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY) wait_for_send = True skip_disconnected_relays = True opts = (Options().wait_for_send(wait_for_send).send_timeout(timedelta(seconds=self.dvm_config.RELAY_TIMEOUT)) .skip_disconnected_relays(skip_disconnected_relays)) signer = ClientSigner.keys(self.keys) self.client = Client.with_opts(signer,opts) self.job_list = [] self.jobs_on_hold_list = [] pk = self.keys.public_key() print("Nostr DVM public key: " + str(pk.to_bech32()) + " Hex: " + str(pk.to_hex()) + " Supported DVM tasks: " + ', '.join(p.NAME + ":" + p.TASK for p in self.dvm_config.SUPPORTED_DVMS) + "\n") for relay in self.dvm_config.RELAY_LIST: self.client.add_relay(relay) self.client.connect() zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_ZAP]).since(Timestamp.now()) kinds = [EventDefinitions.KIND_NIP90_GENERIC] for dvm in self.dvm_config.SUPPORTED_DVMS: if dvm.KIND not in kinds: kinds.append(dvm.KIND) dvm_filter = (Filter().kinds(kinds).since(Timestamp.now())) self.client.subscribe([dvm_filter, zap_filter]) create_sql_table(self.dvm_config.DB) admin_make_database_updates(adminconfig=self.admin_config, dvmconfig=self.dvm_config, client=self.client) class NotificationHandler(HandleNotification): client = self.client dvm_config = self.dvm_config keys = self.keys def handle(self, relay_url, nostr_event): if EventDefinitions.KIND_NIP90_EXTRACT_TEXT <= nostr_event.kind() <= EventDefinitions.KIND_NIP90_GENERIC: handle_nip90_job_event(nostr_event) elif nostr_event.kind() == EventDefinitions.KIND_ZAP: handle_zap(nostr_event) def handle_msg(self, relay_url, msg): return def handle_nip90_job_event(nip90_event): nip90_event = check_and_decrypt_tags(nip90_event, self.dvm_config) if nip90_event is None: return user = get_or_add_user(self.dvm_config.DB, nip90_event.pubkey().to_hex(), client=self.client, config=self.dvm_config) cashu = "" p_tag_str = "" for tag in nip90_event.tags(): if tag.as_vec()[0] == "cashu": cashu = tag.as_vec()[1] elif tag.as_vec()[0] == "p": p_tag_str = tag.as_vec()[1] task_supported, task = check_task_is_supported(nip90_event, client=self.client, config=self.dvm_config) if user.isblacklisted: send_job_status_reaction(nip90_event, "error", client=self.client, dvm_config=self.dvm_config) print("[" + self.dvm_config.NIP89.NAME + "] Request by blacklisted user, skipped") elif task_supported: print("[" + self.dvm_config.NIP89.NAME + "] Received new Request: " + task + " from " + user.name) duration = input_data_file_duration(nip90_event, dvm_config=self.dvm_config, client=self.client) amount = get_amount_per_task(task, self.dvm_config, duration) if amount is None: return task_is_free = False for dvm in self.dvm_config.SUPPORTED_DVMS: if dvm.TASK == task and dvm.FIX_COST == 0 and dvm.PER_UNIT_COST == 0: task_is_free = True cashu_redeemed = False if cashu != "": print(cashu) cashu_redeemed, cashu_message, redeem_amount, fees = redeem_cashu(cashu, self.dvm_config, self.client, int(amount)) print(cashu_message) if cashu_message != "success": send_job_status_reaction(nip90_event, "error", False, amount, self.client, cashu_message, self.dvm_config) return # if user is whitelisted or task is free, just do the job if (user.iswhitelisted or task_is_free or cashu_redeemed) and (p_tag_str == "" or p_tag_str == self.dvm_config.PUBLIC_KEY): print( "[" + self.dvm_config.NIP89.NAME + "] Free task or Whitelisted for task " + task + ". Starting processing..") send_job_status_reaction(nip90_event, "processing", True, 0, client=self.client, dvm_config=self.dvm_config) # when we reimburse users on error make sure to not send anything if it was free if user.iswhitelisted or task_is_free: amount = 0 do_work(nip90_event, amount) # if task is directed to us via p tag and user has balance, do the job and update balance elif p_tag_str == self.dvm_config.PUBLIC_KEY and user.balance >= int(amount): balance = max(user.balance - int(amount), 0) update_sql_table(db=self.dvm_config.DB, npub=user.npub, balance=balance, iswhitelisted=user.iswhitelisted, isblacklisted=user.isblacklisted, nip05=user.nip05, lud16=user.lud16, name=user.name, lastactive=Timestamp.now().as_secs()) print( "[" + self.dvm_config.NIP89.NAME + "] Using user's balance for task: " + task + ". Starting processing.. New balance is: " + str(balance)) send_job_status_reaction(nip90_event, "processing", True, 0, client=self.client, dvm_config=self.dvm_config) do_work(nip90_event, amount) # else send a payment required event to user elif p_tag_str == "" or p_tag_str == self.dvm_config.PUBLIC_KEY: bid = 0 for tag in nip90_event.tags(): if tag.as_vec()[0] == 'bid': bid = int(tag.as_vec()[1]) print( "[" + self.dvm_config.NIP89.NAME + "] Payment required: New Nostr " + task + " Job event: " + nip90_event.as_json()) if bid > 0: bid_offer = int(bid / 1000) if bid_offer >= int(amount): send_job_status_reaction(nip90_event, "payment-required", False, int(amount), # bid_offer client=self.client, dvm_config=self.dvm_config) else: # If there is no bid, just request server rate from user print( "[" + self.dvm_config.NIP89.NAME + "] Requesting payment for Event: " + nip90_event.id().to_hex()) send_job_status_reaction(nip90_event, "payment-required", False, int(amount), client=self.client, dvm_config=self.dvm_config) else: print("[" + self.dvm_config.NIP89.NAME + "] Job addressed to someone else, skipping..") # else: # print("[" + self.dvm_config.NIP89.NAME + "] Task " + task + " not supported on this DVM, skipping..") def handle_zap(zap_event): try: invoice_amount, zapped_event, sender, message, anon = parse_zap_event_tags(zap_event, self.keys, self.dvm_config.NIP89.NAME, self.client, self.dvm_config) user = get_or_add_user(db=self.dvm_config.DB, npub=sender, client=self.client, config=self.dvm_config) if zapped_event is not None: if zapped_event.kind() == EventDefinitions.KIND_FEEDBACK: amount = 0 job_event = None p_tag_str = "" for tag in zapped_event.tags(): if tag.as_vec()[0] == 'amount': amount = int(float(tag.as_vec()[1]) / 1000) elif tag.as_vec()[0] == 'e': job_event = get_event_by_id(tag.as_vec()[1], client=self.client, config=self.dvm_config) if job_event is not None: job_event = check_and_decrypt_tags(job_event, self.dvm_config) if job_event is None: return else: return # if a reaction by us got zapped task_supported, task = check_task_is_supported(job_event, client=self.client, config=self.dvm_config) if job_event is not None and task_supported: print("Zap received for NIP90 task: " + str(invoice_amount) + " Sats from " + str( user.name)) if amount <= invoice_amount: print("[" + self.dvm_config.NIP89.NAME + "] Payment-request fulfilled...") send_job_status_reaction(job_event, "processing", client=self.client, dvm_config=self.dvm_config) indices = [i for i, x in enumerate(self.job_list) if x.event == job_event] index = -1 if len(indices) > 0: index = indices[0] if index > -1: if self.job_list[index].is_processed: # If payment-required appears a processing self.job_list[index].is_paid = True check_and_return_event(self.job_list[index].result, job_event) elif not (self.job_list[index]).is_processed: # If payment-required appears before processing self.job_list.pop(index) print("Starting work...") do_work(job_event, invoice_amount) else: print("Job not in List, but starting work...") do_work(job_event, invoice_amount) else: send_job_status_reaction(job_event, "payment-rejected", False, invoice_amount, client=self.client, dvm_config=self.dvm_config) print("[" + self.dvm_config.NIP89.NAME + "] Invoice was not paid sufficiently") elif zapped_event.kind() in EventDefinitions.ANY_RESULT: print("[" + self.dvm_config.NIP89.NAME + "] " "Someone zapped the result of an exisiting Task. Nice") elif not anon: print("[" + self.dvm_config.NIP89.NAME + "] Note Zap received for DVM balance: " + str(invoice_amount) + " Sats from " + str(user.name)) update_user_balance(self.dvm_config.DB, sender, invoice_amount, client=self.client, config=self.dvm_config) # a regular note elif not anon: print("[" + self.dvm_config.NIP89.NAME + "] Profile Zap received for DVM balance: " + str(invoice_amount) + " Sats from " + str(user.name)) update_user_balance(self.dvm_config.DB, sender, invoice_amount, client=self.client, config=self.dvm_config) except Exception as e: print("[" + self.dvm_config.NIP89.NAME + "] Error during content decryption: " + str(e)) def check_event_has_not_unfinished_job_input(nevent, append, client, dvmconfig): task_supported, task = check_task_is_supported(nevent, client, config=dvmconfig) if not task_supported: return False for tag in nevent.tags(): if tag.as_vec()[0] == 'i': if len(tag.as_vec()) < 3: print("Job Event missing/malformed i tag, skipping..") return False else: input = tag.as_vec()[1] input_type = tag.as_vec()[2] if input_type == "job": evt = get_referenced_event_by_id(event_id=input, client=client, kinds=EventDefinitions.ANY_RESULT, dvm_config=dvmconfig) if evt is None: if append: job_ = RequiredJobToWatch(event=nevent, timestamp=Timestamp.now().as_secs()) self.jobs_on_hold_list.append(job_) send_job_status_reaction(nevent, "chain-scheduled", True, 0, client=client, dvm_config=dvmconfig) return False else: return True def check_and_return_event(data, original_event: Event): amount = 0 for x in self.job_list: if x.event == original_event: is_paid = x.is_paid amount = x.amount x.result = data x.is_processed = True if self.dvm_config.SHOW_RESULT_BEFORE_PAYMENT and not is_paid: send_nostr_reply_event(data, original_event.as_json()) send_job_status_reaction(original_event, "success", amount, dvm_config=self.dvm_config, ) # or payment-required, or both? elif not self.dvm_config.SHOW_RESULT_BEFORE_PAYMENT and not is_paid: send_job_status_reaction(original_event, "success", amount, dvm_config=self.dvm_config, ) # or payment-required, or both? if self.dvm_config.SHOW_RESULT_BEFORE_PAYMENT and is_paid: self.job_list.remove(x) elif not self.dvm_config.SHOW_RESULT_BEFORE_PAYMENT and is_paid: self.job_list.remove(x) send_nostr_reply_event(data, original_event.as_json()) break task = get_task(original_event, self.client, self.dvm_config) for dvm in self.dvm_config.SUPPORTED_DVMS: if task == dvm.TASK: try: post_processed = dvm.post_process(data, original_event) send_nostr_reply_event(post_processed, original_event.as_json()) except Exception as e: # Zapping back by error in post-processing is a risk for the DVM because work has been done, # but maybe something with parsing/uploading failed. Try to avoid errors here as good as possible send_job_status_reaction(original_event, "error", content="Error in Post-processing: " + str(e), dvm_config=self.dvm_config, ) if amount > 0 and self.dvm_config.LNBITS_ADMIN_KEY != "": user = get_or_add_user(self.dvm_config.DB, original_event.pubkey().to_hex(), client=self.client, config=self.dvm_config) print(user.lud16 + " " + str(amount)) bolt11 = zaprequest(user.lud16, amount, "Couldn't finish job, returning sats", original_event, self.keys, self.dvm_config, zaptype="private") if bolt11 is None: print("Receiver has no Lightning address, can't zap back.") return try: payment_hash = pay_bolt11_ln_bits(bolt11, self.dvm_config) except Exception as e: print(e) def send_nostr_reply_event(content, original_event_as_str): original_event = Event.from_json(original_event_as_str) request_tag = Tag.parse(["request", original_event_as_str]) e_tag = Tag.parse(["e", original_event.id().to_hex()]) p_tag = Tag.parse(["p", original_event.pubkey().to_hex()]) alt_tag = Tag.parse(["alt", "This is the result of a NIP90 DVM AI task with kind " + str( original_event.kind()) + ". The task was: " + original_event.content()]) status_tag = Tag.parse(["status", "success"]) reply_tags = [request_tag, e_tag, p_tag, alt_tag, status_tag] encrypted = False for tag in original_event.tags(): if tag.as_vec()[0] == "encrypted": encrypted = True encrypted_tag = Tag.parse(["encrypted"]) reply_tags.append(encrypted_tag) for tag in original_event.tags(): if tag.as_vec()[0] == "i": i_tag = tag if not encrypted: reply_tags.append(i_tag) if encrypted: print(content) content = nip04_encrypt(self.keys.secret_key(), PublicKey.from_hex(original_event.pubkey().to_hex()), content) reply_event = EventBuilder(original_event.kind() + 1000, str(content), reply_tags).to_event(self.keys) send_event(reply_event, client=self.client, dvm_config=self.dvm_config) print("[" + self.dvm_config.NIP89.NAME + "] " + str( original_event.kind() + 1000) + " Job Response event sent: " + reply_event.as_json()) def send_job_status_reaction(original_event, status, is_paid=True, amount=0, client=None, content=None, dvm_config=None): task = get_task(original_event, client=client, dvm_config=dvm_config) alt_description, reaction = build_status_reaction(status, task, amount, content) e_tag = Tag.parse(["e", original_event.id().to_hex()]) p_tag = Tag.parse(["p", original_event.pubkey().to_hex()]) alt_tag = Tag.parse(["alt", alt_description]) status_tag = Tag.parse(["status", status]) reply_tags = [e_tag, alt_tag, status_tag] encryption_tags = [] encrypted = False for tag in original_event.tags(): if tag.as_vec()[0] == "encrypted": encrypted = True encrypted_tag = Tag.parse(["encrypted"]) encryption_tags.append(encrypted_tag) if encrypted: encryption_tags.append(p_tag) else: reply_tags.append(p_tag) if status == "success" or status == "error": # for x in self.job_list: if x.event == original_event: is_paid = x.is_paid amount = x.amount break bolt11 = "" payment_hash = "" expires = original_event.created_at().as_secs() + (60 * 60 * 24) if status == "payment-required" or (status == "processing" and not is_paid): if dvm_config.LNBITS_INVOICE_KEY != "": try: bolt11, payment_hash = create_bolt11_ln_bits(amount,dvm_config) except Exception as e: print(e) try: bolt11, payment_hash = create_bolt11_lud16(dvm_config.LN_ADDRESS, amount) except Exception as e: print(e) bolt11 = None elif dvm_config.LN_ADDRESS != "": try: bolt11, payment_hash = create_bolt11_lud16(dvm_config.LN_ADDRESS, amount) except Exception as e: print(e) bolt11 = None if not any(x.event == original_event for x in self.job_list): self.job_list.append( JobToWatch(event=original_event, timestamp=original_event.created_at().as_secs(), amount=amount, is_paid=is_paid, status=status, result="", is_processed=False, bolt11=bolt11, payment_hash=payment_hash, expires=expires)) # print(str(self.job_list)) if (status == "payment-required" or status == "payment-rejected" or ( status == "processing" and not is_paid) or (status == "success" and not is_paid)): if dvm_config.LNBITS_INVOICE_KEY != "": amount_tag = Tag.parse(["amount", str(amount * 1000), bolt11]) else: amount_tag = Tag.parse(["amount", str(amount * 1000)]) # to millisats reply_tags.append(amount_tag) if encrypted: content_tag = Tag.parse(["content", reaction]) reply_tags.append(content_tag) str_tags = [] for element in reply_tags: str_tags.append(element.as_vec()) content = json.dumps(str_tags) content = nip04_encrypt(self.keys.secret_key(), PublicKey.from_hex(original_event.pubkey().to_hex()), content) reply_tags = encryption_tags else: content = reaction keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY) reaction_event = EventBuilder(EventDefinitions.KIND_FEEDBACK, str(content), reply_tags).to_event(keys) send_event(reaction_event, client=self.client, dvm_config=self.dvm_config) print("[" + self.dvm_config.NIP89.NAME + "]" + ": Sent Kind " + str( EventDefinitions.KIND_FEEDBACK) + " Reaction: " + status + " " + reaction_event.as_json()) return reaction_event.as_json() def do_work(job_event, amount): if ((EventDefinitions.KIND_NIP90_EXTRACT_TEXT <= job_event.kind() <= EventDefinitions.KIND_NIP90_GENERIC) or job_event.kind() == EventDefinitions.KIND_DM): task = get_task(job_event, client=self.client, dvm_config=self.dvm_config) for dvm in self.dvm_config.SUPPORTED_DVMS: result = "" try: if task == dvm.TASK: request_form = dvm.create_request_from_nostr_event(job_event, self.client, self.dvm_config) if dvm_config.USE_OWN_VENV: python_location = "/bin/python" if platform == "win32": python_location = "/Scripts/python" python_bin = (r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0] + python_location) retcode = subprocess.call([python_bin, dvm_config.SCRIPT, '--request', json.dumps(request_form), '--identifier', dvm_config.IDENTIFIER, '--output', 'output.txt']) print("Finished processing, loading data..") with open(os.path.abspath('output.txt')) as f: resultall = f.readlines() for line in resultall: if line != '\n': result += line os.remove(os.path.abspath('output.txt')) assert not result.startswith("Error:") print(result) else: # Some components might have issues with running code in otuside venv. # We install locally in these cases for now result = dvm.process(request_form) try: post_processed = dvm.post_process(str(result), job_event) send_nostr_reply_event(post_processed, job_event.as_json()) except Exception as e: send_job_status_reaction(job_event, "error", content=str(e), dvm_config=self.dvm_config) except Exception as e: # we could send the exception here to the user, but maybe that's not a good idea after all. send_job_status_reaction(job_event, "error", content=result, dvm_config=self.dvm_config) # Zapping back the user on error if amount > 0 and self.dvm_config.LNBITS_ADMIN_KEY != "": user = get_or_add_user(self.dvm_config.DB, job_event.pubkey().to_hex(), client=self.client, config=self.dvm_config) print(user.lud16 + " " + str(amount)) bolt11 = zaprequest(user.lud16, amount, "Couldn't finish job, returning sats", job_event, user.npub, self.keys, self.dvm_config.RELAY_LIST, zaptype="private") if bolt11 is None: print("Receiver has no Lightning address, can't zap back.") return try: payment_hash = pay_bolt11_ln_bits(bolt11, self.dvm_config) except Exception as e: print(e) return self.client.handle_notifications(NotificationHandler()) while True: for job in self.job_list: if job.bolt11 != "" and job.payment_hash != "" and not job.is_paid:
ispaid = check_bolt11_ln_bits_is_paid(job.payment_hash, self.dvm_config)
19
2023-11-17 18:32:56+00:00
16k
IBM/oper8
oper8/watch_manager/python_watch_manager/filters/filters.py
[ { "identifier": "KubeEventType", "path": "oper8/deploy_manager/kube_event.py", "snippet": "class KubeEventType(Enum):\n \"\"\"Enum for all possible kubernetes event types\"\"\"\n\n DELETED = \"DELETED\"\n MODIFIED = \"MODIFIED\"\n ADDED = \"ADDED\"" }, { "identifier": "ManagedObject", "path": "oper8/managed_object.py", "snippet": "class ManagedObject: # pylint: disable=too-many-instance-attributes\n \"\"\"Basic struct to represent a managed kubernetes object\"\"\"\n\n def __init__(self, definition):\n self.kind = definition.get(\"kind\")\n self.metadata = definition.get(\"metadata\", {})\n self.name = self.metadata.get(\"name\")\n self.namespace = self.metadata.get(\"namespace\")\n self.uid = self.metadata.get(\"uid\", uuid.uuid4())\n self.resource_version = self.metadata.get(\"resourceVersion\")\n self.api_version = definition.get(\"apiVersion\")\n self.definition = definition\n\n # If resource is not list then check name\n if KUBE_LIST_IDENTIFIER not in self.kind:\n assert self.name is not None, \"No name found\"\n\n assert self.kind is not None, \"No kind found\"\n assert self.api_version is not None, \"No apiVersion found\"\n\n def get(self, *args, **kwargs):\n \"\"\"Pass get calls to the objects definition\"\"\"\n return self.definition.get(*args, **kwargs)\n\n def __str__(self):\n return f\"{self.api_version}/{self.kind}/{self.name}\"\n\n def __repr__(self):\n return str(self)\n\n def __hash__(self):\n \"\"\"Hash explicitly excludes the definition so that the object's\n identifier in a map can be based only on the unique identifier of the\n resource in the cluster. If the original resource did not provide a unique\n identifier then use the apiVersion, kind, and name\n \"\"\"\n return hash(self.metadata.get(\"uid\", str(self)))\n\n def __eq__(self, other):\n return hash(self) == hash(other)" }, { "identifier": "ReconcileManager", "path": "oper8/reconcile.py", "snippet": "class ReconcileManager: # pylint: disable=too-many-lines\n \"\"\"This class manages reconciliations for an instance of Oper8. It's\n primary function is to run reconciles given a CR manifest, Controller,\n and the current cluster state via a DeployManager.\n \"\"\"\n\n ## Construction ############################################################\n\n def __init__(\n self,\n home_dir: str = None,\n deploy_manager: Optional[DeployManagerBase] = None,\n enable_vcs: Optional[bool] = None,\n reimport_controller: Optional[bool] = True,\n ):\n \"\"\"The constructor sets up the properties used across every\n reconcile and checks that the current config is valid.\n\n Args:\n home_dir: Optional[str]=None\n The root directory for importing controllers or VCS checkout\n deploy_manager: Optional[DeployManager]=None\n Deploy manager to use. If not given, a new DeployManager will\n be created for each reconcile.\n enable_vcs: Optional[bool]=True\n Parameter to manually control the state of VCS on a per instance\n basis\n reimport_controller: Optional[bool]=None\n Parameter to manually control if a controller needs to be reimported each\n reconcile.\n \"\"\"\n\n if home_dir:\n self.home_dir = home_dir\n elif config.vcs.enabled:\n self.home_dir = config.vcs.repo\n else:\n self.home_dir = os.getcwd()\n\n self.vcs = None\n\n # If enable_vcs is not provided than default to\n # config\n if enable_vcs is None:\n enable_vcs = config.vcs.enabled\n\n if enable_vcs:\n assert_config(\n config.vcs.repo,\n \"Can not enable vcs without supply source repo at vcs.repo\",\n )\n assert_config(\n config.vcs.dest,\n \"Cannot require enable vcs without providing a destination\",\n )\n vcs_checkout_methods = [method.value for method in VCSCheckoutMethod]\n assert_config(\n config.vcs.checkout_method in vcs_checkout_methods,\n f\"VCS checkout method must be one of the following {vcs_checkout_methods}\",\n )\n\n self.vcs = VCS(self.home_dir)\n\n # Ensure config is setup correctly for strict_versioning\n if config.strict_versioning:\n assert_config(\n config.supported_versions is not None,\n \"Must provide supported_versions with strict_versioning=True\",\n )\n assert_config(\n config.vcs.field is not None,\n \"Must provide vcs.field with strict_versioning=True\",\n )\n\n self.deploy_manager = deploy_manager\n self.reimport_controller = reimport_controller\n\n ## Reconciliation ############################################################\n\n @alog.logged_function(log.info)\n @alog.timed_function(log.info, \"Reconcile finished in: \")\n def reconcile(\n self,\n controller_info: CONTROLLER_INFO,\n resource: Union[dict, aconfig.Config],\n is_finalizer: bool = False,\n ) -> ReconciliationResult:\n \"\"\"This is the main entrypoint for reconciliations and contains the\n core implementation. The general reconcile path is as follows:\n\n 1. Parse the raw CR manifest\n 2. Setup logging based on config with overrides from CR\n 3. Check if the CR is paused and for strict versioning\n 4. Setup directory if VCS is enabled\n 5. Import and construct the Controller\n 6. Setup the DeployManager and Session objects\n 7. Run the Controller reconcile\n\n Args:\n controller_info: CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n resource: Union[dict, aconfig.Config]\n A raw representation of the resource to be reconciled\n is_finalizer: bool=False\n Whether the resource is being deleted\n\n Returns:\n reconcile_result: ReconciliationResult\n The result of the reconcile\n \"\"\"\n\n # Parse the full CR content\n cr_manifest = self.parse_manifest(resource)\n\n # generate id unique to this session\n reconcile_id = self.generate_id()\n\n # Initialize logging prior to any other work\n self.configure_logging(cr_manifest, reconcile_id)\n\n # If paused, do nothing and don't requeue\n if self._is_paused(cr_manifest):\n log.info(\"CR is paused. Exiting reconciliation\")\n result = ReconciliationResult(requeue=False, requeue_params=RequeueParams())\n return result\n\n # Check strict versioning before continuing\n if config.strict_versioning:\n self._check_strict_versioning(cr_manifest)\n\n # Check if VCS is enabled and then attempt to checkout\n if config.vcs.enabled:\n self.setup_vcs(cr_manifest)\n\n # Import controller and setup the instance\n controller = self.setup_controller(controller_info)\n\n # Configure deploy manager on a per reconcile basis for\n # owner references unless a manager is provided on initialization\n deploy_manager = self.setup_deploy_manager(cr_manifest)\n\n # Setup Session\n session = self.setup_session(\n controller, cr_manifest, deploy_manager, reconcile_id\n )\n\n # Run the controller reconcile\n result = self.run_controller(controller, session, is_finalizer)\n\n return result\n\n def safe_reconcile(\n self,\n controller_info: CONTROLLER_INFO,\n resource: dict,\n is_finalizer: bool = False,\n ) -> ReconciliationResult:\n \"\"\"\n This function calls out to reconcile but catches any errors thrown. This\n function guarantees a safe result which is needed by some Watch Managers\n\n Args:\n controller_info: CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n resource: Union[dict, aconfig.Config]\n A raw representation of the reconcile\n is_finalize: bool=False\n Whether the resource is being deleted\n\n Returns:\n reconcile_result: ReconciliationResult\n The result of the reconcile\n\n \"\"\"\n\n try:\n return self.reconcile(controller_info, resource, is_finalizer)\n\n # VCSMultiProcessError is an expected error caused by oper8 which should\n # not be handled by the exception handling code\n except VCSMultiProcessError as exc:\n # Requeue after ~7.5 seconds. Add randomness to avoid\n # repeated conflicts\n requeue_time = 5 + random.uniform(0, 5)\n params = RequeueParams(\n requeue_after=datetime.timedelta(seconds=requeue_time)\n )\n log.debug(\"VCS Multiprocessing Error Detected: {%s}\", exc, exc_info=True)\n log.warning(\n \"VCS Setup failed due to other process. Requeueing in %ss\",\n requeue_time,\n )\n return ReconciliationResult(\n requeue=True, requeue_params=params, exception=exc\n )\n\n # Capture all generic exceptions\n except Exception as exc: # pylint: disable=broad-except\n log.warning(\"Handling caught error in reconcile: %s\", exc, exc_info=True)\n error = exc\n\n if config.manage_status:\n try:\n self._update_error_status(resource, error)\n log.debug(\"Update CR status with error message\")\n except Exception as exc: # pylint: disable=broad-except\n log.error(\"Failed to update status: %s\", exc, exc_info=True)\n\n # If we got to this return it means there was an\n # exception during reconcile and we should requeue\n # with the default backoff period\n log.info(\"Requeuing CR due to error during reconcile\")\n return ReconciliationResult(\n requeue=True, requeue_params=RequeueParams(), exception=error\n )\n\n ## Reconciliation Stages ############################################################\n\n @classmethod\n def parse_manifest(cls, resource: Union[dict, aconfig.Config]) -> aconfig.Config:\n \"\"\"Parse a raw resource into an aconfig Config\n\n Args:\n resource: Union[dict, aconfig.Config])\n The resource to be parsed into a manifest\n\n Returns\n cr_manifest: aconfig.Config\n The parsed and validated config\n \"\"\"\n try:\n cr_manifest = aconfig.Config(resource, override_env_vars=False)\n except (ValueError, SyntaxError, AttributeError) as exc:\n raise ValueError(\"Failed to parse full_cr\") from exc\n\n return cr_manifest\n\n @classmethod\n def configure_logging(cls, cr_manifest: aconfig.Config, reconciliation_id: str):\n \"\"\"Configure the logging for a given reconcile\n\n Args:\n cr_manifest: aconfig.Config\n The resource to get annotation overrides from\n reconciliation_id: str\n The unique id for the reconciliation\n \"\"\"\n\n # Fetch the annotations for logging\n # NOTE: We use safe fetching here because this happens before CR\n # verification in the Session constructor\n annotations = cr_manifest.get(\"metadata\", {}).get(\"annotations\", {})\n default_level = annotations.get(\n constants.LOG_DEFAULT_LEVEL_NAME, config.log_level\n )\n\n filters = annotations.get(constants.LOG_FILTERS_NAME, config.log_filters)\n log_json = annotations.get(constants.LOG_JSON_NAME, str(config.log_json))\n log_thread_id = annotations.get(\n constants.LOG_THREAD_ID_NAME, str(config.log_thread_id)\n )\n\n # Convert boolean args\n log_json = (log_json or \"\").lower() == \"true\"\n log_thread_id = (log_thread_id or \"\").lower() == \"true\"\n\n # Keep the old handler. This is useful if running with ansible as\n # it will preserve the handler generator set up to log to a file\n # since ansible captures all logging output\n handler_generator = None\n if logging.root.handlers:\n old_handler = logging.root.handlers[0]\n\n def handler_generator():\n return old_handler\n\n alog.configure(\n default_level=default_level,\n filters=filters,\n formatter=Oper8JsonFormatter(cr_manifest, reconciliation_id)\n if log_json\n else \"pretty\",\n thread_id=log_thread_id,\n handler_generator=handler_generator,\n )\n\n @classmethod\n def generate_id(cls) -> str:\n \"\"\"Generates a unique human readable id for this reconciliation\n\n Returns:\n id: str\n A unique base32 encoded id\n \"\"\"\n uuid4 = uuid.uuid4()\n base32_str = base64.b32encode(uuid4.bytes).decode(\"utf-8\")\n reconcile_id = base32_str[:22]\n log.debug(\"Generated reconcile id: %s\", reconcile_id)\n return reconcile_id\n\n def setup_vcs(self, cr_manifest: aconfig.Config):\n \"\"\"Setups the VCS directory and sys.path for a reconcile.\n This function also ensures that the version is valid if\n config.strict_versioning is enabled.\n\n Args:\n cr_manifest: aconfig.Config\n The cr manifest to pull the requested version from.\n \"\"\"\n version = get_manifest_version(cr_manifest)\n if not version:\n raise ValueError(\"CR Manifest has no version\")\n\n log.debug(\n \"Setting up working directory with src: %s and version: %s\",\n self.home_dir,\n version,\n )\n working_dir = self._setup_directory(cr_manifest, version)\n\n # Construct working dir path from vcs and git directory\n if config.vcs.module_dir:\n module_path = pathlib.Path(config.vcs.module_dir)\n working_dir = working_dir / module_path\n\n if not working_dir.is_dir():\n log.error(\n \"Working directory %s could not be found. Invalid module path\",\n working_dir,\n )\n raise ConfigError(\n f\"Module path: '{module_path}' could not be found in repository\"\n )\n\n log.debug4(\"Changing working directory to %s\", working_dir)\n os.chdir(working_dir)\n sys.path.insert(0, str(working_dir))\n\n def setup_controller(\n self, controller_info: CONTROLLER_INFO\n ) -> CONTROLLER_CLASS_TYPE:\n \"\"\"\n Import the requested Controller class and enable any compatibility layers\n\n Args:\n controller_info:CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n Returns:\n controller:\n The required Controller Class\n \"\"\"\n\n # Local\n from .controller import ( # pylint: disable=import-outside-toplevel, cyclic-import\n Controller,\n )\n\n # If controller info is already a constructed controller then\n # skip importing\n if isinstance(controller_info, Controller):\n return controller_info\n\n controller_class = self._import_controller(controller_info)\n return self._configure_controller(controller_class)\n\n def setup_deploy_manager(self, cr_manifest: aconfig.Config) -> DeployManagerBase:\n \"\"\"\n Configure a deploy_manager for a reconcile given a manifest\n\n Args:\n cr_manifest: aconfig.Config\n The resource to be used as an owner_ref\n\n Returns:\n deploy_manager: DeployManagerBase\n The deploy_manager to be used during reconcile\n \"\"\"\n if self.deploy_manager:\n return self.deploy_manager\n\n if config.dry_run:\n log.debug(\"Using DryRunDeployManager\")\n return DryRunDeployManager()\n\n log.debug(\"Using OpenshiftDeployManager\")\n return OpenshiftDeployManager(owner_cr=cr_manifest)\n\n def setup_session(\n self,\n controller: CONTROLLER_TYPE,\n cr_manifest: aconfig.Config,\n deploy_manager: DeployManagerBase,\n reconciliation_id: str,\n ) -> Session:\n \"\"\"Construct the session, including gathering the backend config and any temp patches\n\n Args:\n controller: Controller\n The controller class being reconciled\n cr_manifest: aconfig.Config\n The resource being reconciled\n deploy_manager: DeployManagerBase\n The deploy manager used in the cluster\n reconciliation_id: str\n The id for the reconcile\n\n Return:\n session: Session\n The session for reconcile\n \"\"\"\n # Get backend config for reconciliation\n controller_defaults = controller.get_config_defaults()\n reconciliation_config = self._get_reconcile_config(\n cr_manifest=cr_manifest,\n deploy_manager=deploy_manager,\n controller_defaults=controller_defaults,\n )\n log.debug4(\"Gathered Config: %s\", reconciliation_config)\n\n # Get Temporary patches\n patches = self._get_temp_patches(deploy_manager, cr_manifest)\n log.debug3(\"Found %d patches\", len(patches))\n\n # Get the complete CR Manifest including defaults\n cr_manifest_defaults = controller.get_cr_manifest_defaults()\n full_cr_manifest = merge_configs(\n aconfig.Config(cr_manifest_defaults),\n cr_manifest,\n )\n\n return Session(\n reconciliation_id=reconciliation_id,\n cr_manifest=full_cr_manifest,\n config=reconciliation_config,\n deploy_manager=deploy_manager,\n temporary_patches=patches,\n )\n\n def run_controller(\n self, controller: CONTROLLER_TYPE, session: Session, is_finalizer: bool\n ) -> ReconciliationResult:\n \"\"\"Run the Controller's reconciliation or finalizer with the constructed Session.\n This function also updates the CR status and handles requeue logic.\n\n Args:\n controller: Controller\n The Controller being reconciled\n session: Session\n The current Session state\n is_finalizer:\n Whether the resource is being deleted\n\n Returns:\n reconciliation_result: ReconciliationResult\n The result of the reconcile\n \"\"\"\n log.info(\n \"%s resource %s/%s/%s\",\n \"Finalizing\" if is_finalizer else \"Reconciling\",\n session.kind,\n session.namespace,\n session.name,\n )\n\n # Ensure the resource has the proper finalizers\n if controller.has_finalizer:\n add_finalizer(session, controller.finalizer)\n\n # Update the Resource status\n if config.manage_status:\n self._update_reconcile_start_status(session)\n\n # Reconcile the controller\n completion_state = controller.run_reconcile(\n session,\n is_finalizer=is_finalizer,\n )\n\n if config.manage_status:\n self._update_reconcile_completion_status(session, completion_state)\n\n # Check if the controller session should requeue\n requeue, requeue_params = controller.should_requeue(session)\n if not requeue_params:\n requeue_params = RequeueParams()\n\n # Remove managed finalizers if not requeuing\n if not requeue and is_finalizer and controller.has_finalizer:\n remove_finalizer(session, controller.finalizer)\n\n return ReconciliationResult(requeue=requeue, requeue_params=requeue_params)\n\n ## Implementation Details ############################################################\n\n @classmethod\n def _is_paused(cls, cr_manifest: aconfig.Config) -> bool:\n \"\"\"Check if a manifest has a paused annotation\n\n Args:\n cr_manifest: aconfig.Config\n The manifest becking checked\n\n Returns:\n is_paused: bool\n If the manifest contains the paused annotation\n \"\"\"\n annotations = cr_manifest.metadata.get(\"annotations\", {})\n paused = annotations.get(constants.PAUSE_ANNOTATION_NAME)\n return paused and paused.lower() == \"true\"\n\n def _check_strict_versioning(self, cr_manifest: aconfig.Config):\n \"\"\"Check the version against config and vcs directory\n\n Args:\n version_directory: str\n The repo directory to check\n version: str\n The version from the manifest\n \"\"\"\n version = get_manifest_version(cr_manifest)\n if not version:\n raise ValueError(\"CR Manifest has no version\")\n\n # Ensure version is in list of supported versions\n assert_config(\n version in config.supported_versions,\n f\"Unsupported version: {version}\",\n )\n\n # If VCS is enabled ensure the branch or tag exists\n if self.vcs:\n repo_versions = self.vcs.list_refs()\n assert_config(\n version in repo_versions,\n f\"Version not found in repo: {version}\",\n )\n log.debug3(\"Supported VCS Versions: %s\", repo_versions)\n\n def _setup_directory(\n self, cr_manifest: aconfig.Config, version: str\n ) -> pathlib.Path:\n \"\"\"Construct the VCS directory from the cr_manifest and version. Then\n checkout the directory\n\n Args:\n cr_manifest: aconfig.Config\n The manifest to be used for the checkout path\n version: str\n The version to checkout\n\n Returns:\n destination_directory: pathlib.Path\n The destination directory for the checkout\n \"\"\"\n\n # Generate checkout directory and ensure path exists\n def sanitize_for_path(path):\n keepcharacters = (\" \", \".\", \"_\")\n return \"\".join(\n c for c in path if c.isalnum() or c in keepcharacters\n ).rstrip()\n\n # Setup destination templating to allow for CR specific checkout paths\n # The entirety of the cr_manifest is included as a dict as well as some\n # custom keys\n template_mappings = {\n # Include the entire dict first so that the sanitized default values\n # take precedence\n **cr_manifest,\n \"version\": version,\n \"kind\": sanitize_for_path(cr_manifest.kind),\n \"apiVersion\": sanitize_for_path(cr_manifest.apiVersion),\n \"namespace\": sanitize_for_path(cr_manifest.metadata.namespace),\n \"name\": sanitize_for_path(cr_manifest.metadata.name),\n }\n\n # Get the checkout directory and method\n try:\n formatted_path = config.vcs.dest.format(**template_mappings)\n except KeyError as exc:\n log.warning(\n \"Invalid key: %s found in vcs destination template\", exc, exc_info=True\n )\n raise ConfigError(\"Invalid Key found in vcs destination template\") from exc\n\n checkout_dir = pathlib.Path(formatted_path)\n checkout_method = VCSCheckoutMethod(config.vcs.checkout_method)\n\n log.debug2(\n \"Checking out into directory %s with method %s\",\n checkout_dir,\n checkout_method.value,\n )\n self.vcs.checkout_ref(version, checkout_dir, checkout_method)\n return checkout_dir\n\n def _import_controller(\n self, controller_info: CONTROLLER_INFO\n ) -> CONTROLLER_CLASS_TYPE:\n \"\"\"Parse the controller info and reimport the controller\n\n Args:\n controller_info:CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n Returns:\n controller_class: Type[Controller]\n The reimported Controller\n\n \"\"\"\n log.debug2(\"Parsing controller_info\")\n if isinstance(controller_info, str):\n class_module_parts = controller_info.rsplit(\".\", maxsplit=1)\n assert_config(\n len(class_module_parts) == 2,\n f\"Invalid controller_class [{controller_info}]. Format is <module>.<class>\",\n )\n module_name, class_name = class_module_parts\n else:\n class_name = controller_info.__name__\n module_name = controller_info.__module__\n\n # Reimport module if reimporting is enabled and if it already exists\n if self.reimport_controller and module_name in sys.modules:\n log.debug2(\"UnImporting controller module: %s\", module_name)\n sys.modules.pop(module_name)\n\n # UnImport the controller and any parent modules\n # so controller can be reimported from the most\n # recent sys path\n module_parts = module_name.split(\".\")\n for i in range(1, len(module_parts)):\n parent_module = \".\".join(module_parts[:-i])\n if parent_module in sys.modules:\n log.debug3(\"UnImporting module: %s\", parent_module)\n sys.modules.pop(parent_module, None)\n\n log.debug2(\"Attempting to import [%s.%s]\", module_name, class_name)\n\n # Attempt to import the module\n try:\n app_module = importlib.import_module(module_name)\n if not hasattr(app_module, class_name):\n raise ConfigError(\n f\"Invalid controller_class [{class_name}].\"\n f\" Class not found in module [{module_name}]\"\n )\n controller_class = getattr(app_module, class_name)\n\n # Import controller in function to avoid circular imports\n # Local\n from .controller import ( # pylint: disable=import-outside-toplevel\n Controller,\n )\n\n if not issubclass(controller_class, Controller):\n raise ConfigError(\n f\"Invalid controller_class [{module_name}.{class_name}].\"\n f\" [{class_name}] is not a Controller\"\n )\n\n except ImportError as exc:\n log.error(\n \"Failed to import [%s.%s]. Failed to import [%s]\",\n module_name,\n class_name,\n module_name,\n exc_info=True,\n )\n raise ConfigError(\"Invalid Controller Class Specified\") from exc\n\n log.debug(\n \"Imported Controller %s from file %s\",\n controller_class,\n sys.modules[controller_class.__module__].__file__,\n )\n\n return controller_class\n\n def _configure_controller(\n self, controller_class: CONTROLLER_CLASS_TYPE\n ) -> CONTROLLER_TYPE:\n \"\"\"Construct the Controller Class\n\n Args:\n controller_class: CONTROLLER_CLASS_TYPE\n The Controller class to be reconciled\n\n Returns:\n controller: Controller\n The constructed Controller\n\n \"\"\"\n log.debug3(\"Constructing controller\")\n controller = controller_class()\n return controller\n\n def _get_reconcile_config(\n self,\n cr_manifest: aconfig.Config,\n deploy_manager: DeployManagerBase,\n controller_defaults: aconfig.Config,\n ) -> aconfig.Config:\n \"\"\"Construct the flattened backend config for this reconciliation\n starting with a deepcopy of the base and merge in overrides from the CR\n\n Args:\n cr_manifest: aconfig.Config:\n The manifest to get overrides from\n deploy_manager: DeployManagerBase:\n The deploy manager to get the default configmap config\n controller_defaults: aconfig.Config:\n The config defaults provided by the controller class\n\n Returns:\n reconcile_config: aconfig.Config\n The reconciliation config\n \"\"\"\n metadata = cr_manifest.get(\"metadata\", {})\n annotations = metadata.get(\"annotations\", {})\n namespace = metadata.get(\"namespace\")\n cr_config_defaults = cr_manifest.get(constants.CONFIG_OVERRIDES, {})\n annotation_config_defaults = {}\n if constants.CONFIG_DEFAULTS_ANNOTATION_NAME in annotations:\n log.debug(\"Pulling config_defaults based on annotation\")\n config_defaults_name = annotations[\n constants.CONFIG_DEFAULTS_ANNOTATION_NAME\n ]\n\n # Allow sub-keys to be deliniated by \"/\"\n parts = config_defaults_name.split(\"/\")\n config_defaults_cm_name = parts[0]\n\n log.debug2(\n \"Reading config_defaults from ConfigMap [%s]\", config_defaults_cm_name\n )\n success, content = deploy_manager.get_object_current_state(\n kind=\"ConfigMap\",\n name=config_defaults_cm_name,\n namespace=namespace,\n api_version=\"v1\",\n )\n assert_cluster(success, \"Failed to look up config defaults form ConfigMap\")\n assert_config(\n content is not None,\n f\"Did not find configured config defaults ConfigMap: {config_defaults_cm_name}\",\n )\n assert_config(\"data\" in content, \"Got ConfigMap content with out 'data'\")\n config_defaults_content = content[\"data\"]\n assert_config(\n isinstance(config_defaults_content, dict),\n f\"Incorrectly formatted config_defaults ConfigMap: {config_defaults_cm_name}\",\n )\n\n # Parse as a Config\n log.debug2(\"Parsing app config dict\")\n annotation_config_defaults = aconfig.Config(\n config_defaults_content, override_env_vars=False\n )\n\n return merge_configs(\n copy.deepcopy(controller_defaults),\n merge_configs(annotation_config_defaults, cr_config_defaults),\n )\n\n def _get_temp_patches( # pylint: disable=too-many-locals\n self, deploy_manager: DeployManagerBase, cr_manifest: aconfig.Config\n ) -> List[aconfig.Config]:\n \"\"\"Fetch the ordered list of temporary patches that should apply to this\n rollout.\n\n Args:\n deploy_manager: DeployManagerBase\n The DeployManager used to get the current temporary patches\n cr_manifest: aconfig.Config\n The manifest of this reconciliation\n \"\"\"\n\n # Look for patch annotations on the CR\n patch_annotation = (\n cr_manifest.get(\"metadata\", {})\n .get(\"annotations\", {})\n .get(constants.TEMPORARY_PATCHES_ANNOTATION_NAME, \"{}\")\n )\n log.debug3(\"Raw patch annotation: %s\", patch_annotation)\n try:\n raw_patches = json.loads(patch_annotation)\n if not isinstance(raw_patches, dict):\n msg = f\"Patches annotation not a dict: {raw_patches}\"\n log.error(msg)\n raise RolloutError(msg)\n patches = {}\n for patch_name, patch_meta in raw_patches.items():\n patch_meta[\"timestamp\"] = dateutil.parser.parse(patch_meta[\"timestamp\"])\n patches[patch_name] = patch_meta\n if \"api_version\" not in patch_meta:\n raise KeyError(\"api_version\")\n except json.decoder.JSONDecodeError as err:\n msg = f\"Could not parse patches from annotation [{patch_annotation}]\"\n log.error(msg)\n raise RolloutError(msg) from err\n except dateutil.parser.ParserError as err:\n msg = f\"Failed to parse patch timestamp [{patch_annotation}]\"\n log.error(msg)\n raise RolloutError(msg) from err\n except KeyError as err:\n msg = f\"Patch meta incorrectly formatted [{patch_annotation}]\"\n log.error(msg)\n raise RolloutError(msg) from err\n\n # Fetch the state of each patch and add it to the output, sorted by\n # timestamp with the earliest first\n temporary_patches = []\n for patch_name, patch_meta in sorted(\n list(patches.items()), key=lambda x: x[1][\"timestamp\"]\n ):\n # Do the fetch\n log.debug2(\"Fetching patch [%s/%s]\", patch_name, patch_meta[\"timestamp\"])\n namespace = cr_manifest.get(\"metadata\", {}).get(\"namespace\")\n patch_api_version = patch_meta[\"api_version\"]\n patch_kind = patch_meta.get(\"kind\", \"TemporaryPatch\")\n success, content = deploy_manager.get_object_current_state(\n kind=patch_kind,\n name=patch_name,\n api_version=patch_api_version,\n namespace=namespace,\n )\n assert_cluster(success, f\"Failed to fetch patch content for [{patch_name}]\")\n assert_config(content is not None, f\"Patch not found [{patch_name}]\")\n\n # Pull the patch spec and add it to the list\n assert_config(\n content.get(\"spec\") is not None,\n f\"No spec found in patch [{patch_name}]\",\n )\n temporary_patches.append(aconfig.Config(content, override_env_vars=False))\n\n return temporary_patches\n\n ## Status Details ############################################################\n\n def _update_resource_status(\n self, deploy_manager: DeployManagerBase, manifest: aconfig.Config, **kwargs\n ) -> dict:\n \"\"\"Helper function to update the status of a resource given a deploy_manager, manifest\n and status kwargs\n\n Args:\n deploy_manager: DeployManagerBase\n The DeployManager used to update the resource\n manifest: aconfig.Config\n The manifest of the resource being updated\n **kwargs:\n The key word arguments passed to update_resource_status\n\n Returns:\n updated_status: dict\n The updated status applied to the resource\n \"\"\"\n return status.update_resource_status(\n deploy_manager,\n manifest.kind,\n manifest.api_version,\n manifest.metadata.name,\n manifest.metadata.namespace,\n **kwargs,\n )\n\n def _update_reconcile_start_status(self, session: Session):\n \"\"\"Update the status for a resource at the start of a reconciliation\n\n Args:\n session: Session\n The session of the reconcile which includes the DeployManager and resource\n\n \"\"\"\n ready_condition = status.get_condition(status.READY_CONDITION, session.status)\n ready_reason = ready_condition.get(\"reason\")\n if ready_reason is None or session.current_version is None:\n ready_reason = status.ReadyReason.INITIALIZING\n\n optional_kwargs = {}\n if session.current_version and session.version != session.current_version:\n log.debug(\n \"Version change detected: %s -> %s\",\n session.current_version,\n session.version,\n )\n optional_kwargs = {\n \"updating_reason\": status.UpdatingReason.VERSION_CHANGE,\n \"updating_message\": \"Version Change Started: \"\n f\"[{session.current_version}] -> [{session.version}]\",\n }\n ready_reason = status.ReadyReason.IN_PROGRESS\n\n self._update_resource_status(\n session.deploy_manager,\n session.cr_manifest,\n ready_reason=ready_reason,\n ready_message=ready_condition.get(\"message\", \"Initial Rollout Started\"),\n supported_versions=config.supported_versions,\n **optional_kwargs,\n )\n\n def _update_reconcile_completion_status(\n self, session: Session, completion_state: CompletionState\n ):\n \"\"\"Perform CR status updates based on the results of the rollout steps. The status logic is\n as follows:\n 1. Initial Rollout: Ready-INITIALIZING, Updating-VERIFY_WAIT\n 2. Everything complete: Ready-STABLE, Updating-STABLE\n 3. Everything except after_verify: Ready-IN_PROGRESS, Updating-STABLE\n 4. other: Updating-VERIFY_WAIT\n\n Args:\n session: Session\n The session of the reconcile which includes the DeployManager and resource\n completion_state: CompletionState\n The result of the rollout\n \"\"\"\n status_update = {\"component_state\": completion_state}\n\n # If everything completed and verified, set ready and updating to STABLE\n # and set the status's reconciled version to the desired version\n if completion_state.verify_completed():\n status_update[\"ready_reason\"] = status.ReadyReason.STABLE\n status_update[\"ready_message\"] = \"Verify Complete\"\n status_update[\"updating_reason\"] = status.UpdatingReason.STABLE\n status_update[\"updating_message\"] = \"Rollout Complete\"\n status_update[\"version\"] = session.version\n\n # If the completion_state didn't fail then update the ready condition with\n # in_progress and the updating condition with verification incomplete\n else:\n current_status = session.get_status()\n\n # If not initializing then update the ready condition with in_progress\n current_ready_cond = status.get_condition(\n status.READY_CONDITION, current_status\n )\n if (\n current_ready_cond.get(\"reason\")\n != status.ReadyReason.INITIALIZING.value\n ):\n status_update[\"ready_reason\"] = status.ReadyReason.IN_PROGRESS\n status_update[\"ready_message\"] = \"Verify InProgress\"\n\n status_update[\"updating_reason\"] = status.UpdatingReason.VERIFY_WAIT\n status_update[\"updating_message\"] = \"Component verification incomplete\"\n\n log.debug3(\"Updating status after reconcile: %s\", status_update)\n self._update_resource_status(\n session.deploy_manager, session.cr_manifest, **status_update\n )\n\n def _update_error_status(\n self, resource: Union[dict, aconfig.Config], error: Exception\n ) -> dict:\n \"\"\"Update the status of a resource after an error occurred. This function\n setups up it's own deploy manager and parses the resource. This way errors at any\n phase of reconciliation can still get updated\n\n Args:\n resource: Union[dict, aconfig.Config]\n The resource that's status is being updated\n error: Exception\n The exception that stopped the reconciliation\n\n Returns:\n status: dict\n The updated status after the error message\n \"\"\"\n cr_manifest = self.parse_manifest(resource)\n deploy_manager = self.setup_deploy_manager(resource)\n\n # Get the completion state if possible\n component_state = getattr(error, \"completion_state\", None)\n\n # Expected Oper8 Errors\n if isinstance(error, PreconditionError):\n status_update = {\n \"updating_reason\": status.UpdatingReason.PRECONDITION_WAIT,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n elif isinstance(error, (VerificationError, Oper8ExpectedError)):\n status_update = {\n \"updating_reason\": status.UpdatingReason.VERIFY_WAIT,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n elif isinstance(error, ConfigError):\n status_update = {\n \"ready_reason\": status.ReadyReason.CONFIG_ERROR,\n \"ready_message\": str(error),\n \"updating_reason\": status.UpdatingReason.ERRORED,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n elif isinstance(error, ClusterError):\n status_update = {\n \"updating_reason\": status.UpdatingReason.CLUSTER_ERROR,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n\n elif isinstance(error, (RolloutError, Oper8FatalError)):\n status_update = {\n \"ready_reason\": status.ReadyReason.ERRORED,\n \"ready_message\": str(error),\n \"updating_reason\": status.UpdatingReason.ERRORED,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n\n # Catchall for non oper8 errors\n else:\n status_update = {\n \"ready_reason\": status.ReadyReason.ERRORED,\n \"ready_message\": str(error),\n \"updating_reason\": status.UpdatingReason.ERRORED,\n \"updating_message\": str(error),\n }\n\n return self._update_resource_status(\n deploy_manager, cr_manifest, **status_update\n )" }, { "identifier": "READY_CONDITION", "path": "oper8/status.py", "snippet": "READY_CONDITION = \"Ready\"" }, { "identifier": "get_condition", "path": "oper8/status.py", "snippet": "def get_condition(type_name: str, current_status: dict) -> dict:\n \"\"\"Extract the given condition type from a status object\n\n Args:\n type: str\n The condition type to fetch\n current_status: dict\n The dict representation of the status for a given application\n\n Returns:\n condition: dict\n The condition object if found, empty dict otherwise\n \"\"\"\n cond = [\n cond\n for cond in current_status.get(\"conditions\", [])\n if cond.get(\"type\") == type_name\n ]\n if cond:\n assert len(cond) == 1, f\"Found multiple condition entries for {type_name}\"\n return cond[0]\n return {}" }, { "identifier": "abstractclassproperty", "path": "oper8/utils.py", "snippet": "class abstractclassproperty: # pylint: disable=invalid-name,too-few-public-methods\n \"\"\"This decorator implements a classproperty that will raise when accessed\"\"\"\n\n def __init__(self, func):\n self.prop_name = func.__name__\n\n def __get__(self, *args):\n # If this is being called by __setattr__, we're ok because it's\n # apptempting to set the attribute on the class\n curframe = inspect.currentframe()\n callframe = inspect.getouterframes(curframe, 2)[1]\n caller_name = callframe[3]\n if caller_name == \"__setattr__\":\n return None\n\n # If this is a help() call or a pdoc documentation request, return an\n # object with a docstring indicating that the property is abstract\n if (\n \"help\" in callframe.frame.f_code.co_names\n or callframe.frame.f_globals[\"__name__\"] == \"pdoc\"\n ):\n\n class AbstractClassProperty: # pylint: disable=missing-class-docstring\n __slots__ = []\n __doc__ = f\"\"\"The <{self.prop_name}> property is an abstract class property\n that must be overwritten in derived children\n \"\"\"\n\n return AbstractClassProperty\n\n raise NotImplementedError(\n f\"Cannot access abstractclassproperty {self.prop_name}\"\n )" }, { "identifier": "obj_to_hash", "path": "oper8/watch_manager/python_watch_manager/utils/common.py", "snippet": "def obj_to_hash(obj: Any) -> str:\n \"\"\"Get the hash of any jsonable python object\n\n Args:\n obj: Any\n The object to hash\n\n Returns:\n hash: str\n The hash of obj\n \"\"\"\n return hash(json.dumps(obj, sort_keys=True))" }, { "identifier": "RESERVED_PLATFORM_ANNOTATIONS", "path": "oper8/watch_manager/python_watch_manager/utils/constants.py", "snippet": "RESERVED_PLATFORM_ANNOTATIONS = [\n \"k8s.io\",\n \"kubernetes.io\",\n \"openshift.io\",\n]" }, { "identifier": "RESOURCE_VERSION_KEEP_COUNT", "path": "oper8/watch_manager/python_watch_manager/utils/constants.py", "snippet": "RESOURCE_VERSION_KEEP_COUNT = 20" } ]
from abc import ABC, abstractmethod from collections import deque from typing import Optional from ....deploy_manager import KubeEventType from ....managed_object import ManagedObject from ....reconcile import ReconcileManager from ....status import READY_CONDITION, get_condition from ....utils import abstractclassproperty from ..utils import ( RESERVED_PLATFORM_ANNOTATIONS, RESOURCE_VERSION_KEEP_COUNT, obj_to_hash, ) import alog
11,329
The test result """ result = self.test(resource, event) if result is not None and not result: log.debug3( "Failed filter: %s with return val %s", self, result, extra={"resource": resource}, ) self.update(resource) return result ## Generic Resource filters class CreationDeletionFilter(Filter): """Filter to ensure reconciliation on creation and deletion events""" def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if event is ADDED or DELETED""" # Ignore non Added/Deleted Events if event not in [KubeEventType.ADDED, KubeEventType.DELETED]: return return True class GenerationFilter(Filter): """Filter for reconciling on generation changes for resources that support it""" def __init__(self, resource: ManagedObject): """Set generation instance variable""" super().__init__(resource) self.generation = None def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if resource generation is different than before""" # Only update&test resources with a generation if not self.generation: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Test if new generation is different return self.generation != resource.metadata.get("generation") def update(self, resource: ManagedObject): """Update the currently observed generation""" self.generation = resource.metadata.get("generation") class NoGenerationFilter(Filter): """Filter for reconciling changes to spec on resources that don't support the generation field like pods. It does this by hashing the objects excluding status and metadata""" def __init__(self, resource: ManagedObject): """Check if resource supports generation and initialize the hash dict""" self.supports_generation = resource.metadata.get("generation") is not None self.resource_hashes = {} super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return True if a resources current hash differs from the current""" # Don't test resources that support generation or if we don't have hashes yet if self.supports_generation or not self.resource_hashes: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Check each stored resource hash to see if its # changed for key, obj_has in self.resource_hashes.items(): if obj_has != obj_to_hash(resource.get(key)): log.debug2("Detected change in %s", key) return True return False def update(self, resource: ManagedObject): """Update the observed spec hashes""" if self.supports_generation: return # Get the default hashes for all object keys except metadata # and status for key, obj in resource.definition.items(): if key in ["metadata", "status", "kind", "apiVersion"]: continue self.resource_hashes[key] = obj_to_hash(obj) class ResourceVersionFilter(Filter): """Filter for duplicate resource versions which happens when restarting a watch connection""" def __init__(self, resource: ManagedObject): """Initialize the resource version list""" # Use a dequeue instead of a list/set to set a bound on the number # of tracked versions
""" Filters are used to limit the amount of events being reconciled by a watch manager This is based off of the kubernetes controller runtime's "predicates": https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/predicate#Funcs The default set of filters is derived from operator-sdk's ansible predicates https://github.com/operator-framework/operator-sdk/blob/50c6ac03746ff4edf582feb9a71d2a7ea6ae6c40/internal/ansible/controller/controller.go#L105 """ # Standard # First Party # Local log = alog.use_channel("PWMFLT") ## Default Types class Filter(ABC): """Generic Filter Interface for subclassing. Every subclass should implement a `test` function which returns true when a resource should be reconciled. Subclasses can optionally implement a `update` method if the filter requires storing some stateful information like ResourceVersion or Metadata. NOTE: A unique Filter instance is created for each resource """ def __init__(self, resource: ManagedObject): # noqa: B027 """Initializer can be used to detect configuration or create instance variables. Even though a resource is provided it should not set state until update is called Args: resource: ManagedObject This resource can be used by subclass to gather generic information. """ ## Abstract Interface ###################################################### # # These functions must be implemented by child classes ## @abstractmethod def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]: """Test whether the resource&event passes the filter. Returns true if the filter should be reconciled and return false if it should not be. A filter can optionally return None to ignore an event Args: resource: ManagedObject The current resource being checked event: KubeEventType The event type that triggered this filter Returns: result: Optional[bool] The result of the test. """ ## Base Class Interface #################################################### # # These methods MAY be implemented by children, but contain default # implementations that are appropriate for simple cases. # ## def update(self, resource: ManagedObject): # noqa: B027 """Update the instances current state. Args: resource: ManagedObject The current state of the resource """ def update_and_test(self, resource: ManagedObject, event: KubeEventType) -> bool: """First test a resource/event against a filter then update the current state Args: resource: ManagedObject The resource being filtered event: KubeEventType The event to be filtered Returns: test_result: bool The test result """ result = self.test(resource, event) if result is not None and not result: log.debug3( "Failed filter: %s with return val %s", self, result, extra={"resource": resource}, ) self.update(resource) return result ## Generic Resource filters class CreationDeletionFilter(Filter): """Filter to ensure reconciliation on creation and deletion events""" def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if event is ADDED or DELETED""" # Ignore non Added/Deleted Events if event not in [KubeEventType.ADDED, KubeEventType.DELETED]: return return True class GenerationFilter(Filter): """Filter for reconciling on generation changes for resources that support it""" def __init__(self, resource: ManagedObject): """Set generation instance variable""" super().__init__(resource) self.generation = None def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if resource generation is different than before""" # Only update&test resources with a generation if not self.generation: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Test if new generation is different return self.generation != resource.metadata.get("generation") def update(self, resource: ManagedObject): """Update the currently observed generation""" self.generation = resource.metadata.get("generation") class NoGenerationFilter(Filter): """Filter for reconciling changes to spec on resources that don't support the generation field like pods. It does this by hashing the objects excluding status and metadata""" def __init__(self, resource: ManagedObject): """Check if resource supports generation and initialize the hash dict""" self.supports_generation = resource.metadata.get("generation") is not None self.resource_hashes = {} super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return True if a resources current hash differs from the current""" # Don't test resources that support generation or if we don't have hashes yet if self.supports_generation or not self.resource_hashes: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Check each stored resource hash to see if its # changed for key, obj_has in self.resource_hashes.items(): if obj_has != obj_to_hash(resource.get(key)): log.debug2("Detected change in %s", key) return True return False def update(self, resource: ManagedObject): """Update the observed spec hashes""" if self.supports_generation: return # Get the default hashes for all object keys except metadata # and status for key, obj in resource.definition.items(): if key in ["metadata", "status", "kind", "apiVersion"]: continue self.resource_hashes[key] = obj_to_hash(obj) class ResourceVersionFilter(Filter): """Filter for duplicate resource versions which happens when restarting a watch connection""" def __init__(self, resource: ManagedObject): """Initialize the resource version list""" # Use a dequeue instead of a list/set to set a bound on the number # of tracked versions
self.resource_versions = deque([], maxlen=RESOURCE_VERSION_KEEP_COUNT)
8
2023-11-15 16:43:29+00:00
16k
Jisencc/yolov5_dual_weighting
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine or triton # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {\n int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n core = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if ov_model.get_parameters()[0].get_layout().empty:\n ov_model.get_parameters()[0].set_layout(Layout('NCHW'))\n batch_dim = get_batch(ov_model)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, 'r') as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith('tensorflow')\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.ov_compiled_model(im).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.BILINEAR)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "IMG_FORMATS", "path": "utils/dataloaders.py", "snippet": "IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/dataloaders.py", "snippet": "VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes" }, { "identifier": "LoadImages", "path": "utils/dataloaders.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line\n path = Path(path).read_text().rsplit()\n files = []\n for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:\n p = str(Path(p).resolve())\n if '*' in p:\n files.extend(sorted(glob.glob(p, recursive=True))) # glob\n elif os.path.isdir(p):\n files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir\n elif os.path.isfile(p):\n files.append(p) # files\n else:\n raise FileNotFoundError(f'{p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n self.transforms = transforms # optional\n self.vid_stride = vid_stride # video frame-rate stride\n if any(videos):\n self._new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n for _ in range(self.vid_stride):\n self.cap.grab()\n ret_val, im0 = self.cap.retrieve()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n path = self.files[self.count]\n self._new_video(path)\n ret_val, im0 = self.cap.read()\n\n self.frame += 1\n # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n im0 = cv2.imread(path) # BGR\n assert im0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n\n return path, im, im0, self.cap, s\n\n def _new_video(self, path):\n # Create a new video capture object\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)\n self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees\n # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493\n\n def _cv2_rotate(self, im):\n # Rotate a cv2 video manually\n if self.orientation == 0:\n return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)\n elif self.orientation == 180:\n return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)\n elif self.orientation == 90:\n return cv2.rotate(im, cv2.ROTATE_180)\n return im\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadScreenshots", "path": "utils/dataloaders.py", "snippet": "class LoadScreenshots:\n # YOLOv5 screenshot dataloader, i.e. `python detect.py --source \"screen 0 100 100 512 256\"`\n def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):\n # source = [screen_number left top width height] (pixels)\n check_requirements('mss')\n import mss\n\n source, *params = source.split()\n self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0\n if len(params) == 1:\n self.screen = int(params[0])\n elif len(params) == 4:\n left, top, width, height = (int(x) for x in params)\n elif len(params) == 5:\n self.screen, left, top, width, height = (int(x) for x in params)\n self.img_size = img_size\n self.stride = stride\n self.transforms = transforms\n self.auto = auto\n self.mode = 'stream'\n self.frame = 0\n self.sct = mss.mss()\n\n # Parse monitor shape\n monitor = self.sct.monitors[self.screen]\n self.top = monitor['top'] if top is None else (monitor['top'] + top)\n self.left = monitor['left'] if left is None else (monitor['left'] + left)\n self.width = width or monitor['width']\n self.height = height or monitor['height']\n self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # mss screen capture: get raw pixels from the screen as np array\n im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR\n s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n self.frame += 1\n return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s" }, { "identifier": "LoadStreams", "path": "utils/dataloaders.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n torch.backends.cudnn.benchmark = True # faster for fixed-size inference\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n self.vid_stride = vid_stride # video frame-rate stride\n sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]\n n = len(sources)\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video\n # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'\n check_requirements(('pafy', 'youtube_dl==2020.12.2'))\n import pafy\n s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n if s == 0:\n assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'\n assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)')\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n self.auto = auto and self.rect\n self.transforms = transforms # optional\n if not self.rect:\n LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f = 0, self.frames[i] # frame number, frame array\n while cap.isOpened() and n < f:\n n += 1\n cap.grab() # .read() = .grab() followed by .retrieve()\n if n % self.vid_stride == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(0.0) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n im0 = self.imgs.copy()\n if self.transforms:\n im = np.stack([self.transforms(x) for x in im0]) # transforms\n else:\n im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize\n im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n im = np.ascontiguousarray(im) # contiguous\n\n return self.sources, im, im0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = 'yolov5'\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_jupyter():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.8.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(filename, flags=cv2.IMREAD_COLOR):\ndef imwrite(filename, img):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "masks2segments", "path": "utils/segment/general.py", "snippet": "def masks2segments(masks, strategy='largest'):\n # Convert masks(n,160,160) into segments(n,xy)\n segments = []\n for x in masks.int().cpu().numpy().astype('uint8'):\n c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n if c:\n if strategy == 'concat': # concatenate all segments\n c = np.concatenate([x.reshape(-1, 2) for x in c])\n elif strategy == 'largest': # select largest segment\n c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)\n else:\n c = np.zeros((0, 2)) # no segments found\n segments.append(c.astype('float32'))\n return segments" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n protos: [mask_dim, mask_h, mask_w]\n masks_in: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape: input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / shape[0], mw / shape[1]) # gain = old / new\n pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import os import platform import sys import torch from pathlib import Path from ultralytics.utils.plotting import Annotator, colors, save_one_box from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer) from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
11,996
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/predict-seg', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True)
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/predict-seg', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
5
2023-11-12 13:28:26+00:00
16k
RAIVNLab/MatFormer-OLMo
olmo/train.py
[ { "identifier": "PathOrStr", "path": "olmo/aliases.py", "snippet": "" }, { "identifier": "CheckpointType", "path": "olmo/config.py", "snippet": "class CheckpointType(StrEnum):\n sharded = \"sharded\"\n unsharded = \"unsharded\"" }, { "identifier": "SpeedMonitorConfig", "path": "olmo/config.py", "snippet": "class SpeedMonitorConfig(BaseConfig):\n window_size: int = 100\n gpu_flops_available: Optional[Union[float, int]] = None" }, { "identifier": "TrainConfig", "path": "olmo/config.py", "snippet": "class TrainConfig(BaseConfig):\n \"\"\"\n OLMo training configuration.\n \"\"\"\n\n run_name: Optional[str] = None\n \"\"\"\n The name of the run.\n \"\"\"\n\n seed: int = 6198\n \"\"\"\n Used to seed all initial RNG states.\n \"\"\"\n\n dry_run: bool = False\n \"\"\"\n If ``True``, don't actually train.\n \"\"\"\n\n model: ModelConfig = field(default_factory=ModelConfig)\n \"\"\"\n OLMo Model configuration.\n \"\"\"\n\n optimizer: OptimizerConfig = field(default_factory=OptimizerConfig)\n \"\"\"\n Optimizer configuration.\n \"\"\"\n\n scheduler: SchedulerConfig = field(default_factory=SchedulerConfig)\n \"\"\"\n Learning rate scheduler configuration.\n \"\"\"\n\n restore_base_learning_rate: bool = True\n \"\"\"\n Set to ``False`` if you want to restart with the base learning rate from the config, not the checkpoint.\n \"\"\"\n\n data: DataConfig = field(default_factory=DataConfig)\n \"\"\"\n Training data configuration.\n \"\"\"\n\n restore_dataloader: bool = True\n \"\"\"\n When restarting, restore the data loader to where it left off.\n If you restarting in order to train on a different dataset, set this to ``False``.\n \"\"\"\n\n fast_forward_batches: Optional[int] = None\n \"\"\"\n When restarting, use this to fast-forward the dataloader beyond the last checkpoint.\n This can be useful when restarting due to a loss spike in order to skip the data that\n corresponded to the spike.\n \"\"\"\n\n evaluators: List[EvaluatorConfig] = field(default_factory=list)\n \"\"\"\n Evaluation configurations.\n \"\"\"\n\n eval_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to run evaluations.\n \"\"\"\n\n tokenizer: TokenizerConfig = field(default_factory=TokenizerConfig)\n \"\"\"\n Tokenizer configuration.\n \"\"\"\n\n save_folder: str = \"./\"\n \"\"\"\n The directory to save checkpoints to.\n \"\"\"\n\n remote_save_folder: Optional[str] = None\n \"\"\"\n A folder in a cloud bucket to upload saved checkpoints to.\n \"\"\"\n\n save_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to save training state checkpoints that can be used for restarts.\n \"\"\"\n\n save_interval_unsharded: Optional[int] = None\n \"\"\"\n How often (if at all) to save the unsharded state to a single file.\n For large models it can be costly to save these, so it usually makes sense to save\n these less often than regular (sharded) training checkpoints.\n \"\"\"\n\n matformer_factor: int = 1\n\n save_num_checkpoints_to_keep: int = -1\n \"\"\"\n How many checkpoints to keep.\n \"\"\"\n\n save_num_unsharded_checkpoints_to_keep: int = -1\n \"\"\"\n How many unsharded checkpoints to keep.\n \"\"\"\n\n save_overwrite: bool = False\n \"\"\"\n If ``True``, overwrite any conflicting checkpoint files.\n \"\"\"\n\n force_save_unsharded: bool = False\n \"\"\"\n Save an unsharded checkpoint before training (even during a dry run).\n Use this option with `--load-path={PATH}` and `--dry_run` to convert a sharded\n checkpoint into an unsharded checkpoint.\n \"\"\"\n\n load_path: Optional[str] = None\n \"\"\"\n The path to a (sharded) training checkpoint to restore/resume from.\n \"\"\"\n\n max_duration: int = 10000\n \"\"\"\n Maximum number of batches to train for.\n \"\"\"\n\n global_train_batch_size: int = 512\n \"\"\"\n The effective global batch size.\n \"\"\"\n\n device_train_batch_size: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``global_train_batch_size // world_size``.\n \"\"\"\n\n device_train_microbatch_size: int = 16\n \"\"\"\n The number of instances passed to the model in a single forward-backward pass. You should set\n this as large as you can based on available GPU memory.\n \"\"\"\n\n device_eval_batch_size: int = 16\n \"\"\"\n The number of evaluation instances passed to the model in a single forward pass on each device.\n \"\"\"\n\n eval_subset_num_batches: int = -1\n \"\"\"\n The number of batches to use for downstream evaluation from each dataset.\n \"\"\"\n\n eval_on_load: bool = False\n \"\"\"\n When resuming from a checkpoint, run the evaluation loop right away.\n \"\"\"\n\n device_train_grad_accum: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``device_train_batch_size // device_train_microbatch_size``.\n \"\"\"\n\n max_grad_norm: Optional[float] = None\n \"\"\"\n Clip gradients to this value if set.\n \"\"\"\n\n precision: Optional[str] = None\n \"\"\"\n Precision to train with (e.g. \"amp_bf16\", \"amp_fp16\", or \"fp32\").\n \"\"\"\n\n wandb: Optional[WandbConfig] = None\n \"\"\"\n Weights & Biases configuration.\n \"\"\"\n\n speed_monitor: SpeedMonitorConfig = field(default_factory=SpeedMonitorConfig)\n \"\"\"\n Speed monitor configuration.\n \"\"\"\n\n console_log_interval: int = 1\n \"\"\"\n How often to log to the console.\n \"\"\"\n\n compile: Optional[CompilerConfig] = None\n \"\"\"\n Settings for compiling the model with ``torch.compile()``.\n \"\"\"\n\n activation_checkpointing: bool = False\n \"\"\"\n Use activation checkpointing on transformer blocks.\n \"\"\"\n\n fsdp: FSDPConfig = field(default_factory=FSDPConfig)\n \"\"\"\n Fully sharded data parallel settings.\n \"\"\"\n\n softmax_auxiliary_loss: bool = False\n \"\"\"\n If ``True``, we add the auxiliary loss function from PaLM that encourages the softmax\n normalizing term to be close to 0.\n \"\"\"\n\n time_limit: Optional[float] = 60 * 60 * 119.5\n \"\"\"\n The maximum amount of time to train for before saving a checkpoint and ending early.\n On LUMI we have 48 hours max per job, so we default to just under 48 hours to give us time\n to write out a final checkpoint.\n \"\"\"\n\n early_stopping_factor: Optional[float] = None\n\n save_data_indices: bool = True\n \"\"\"\n Save training data indices from each batch for each worker.\n \"\"\"\n\n @property\n def autocast_precision(self) -> torch.dtype:\n if self.precision == \"amp_bf16\":\n return torch.bfloat16\n elif self.precision == \"amp_fp16\":\n return torch.float16\n elif self.precision == \"fp32\":\n return torch.float32\n else:\n raise ValueError(f\"Unexpected precision type '{self.precision}'\")" }, { "identifier": "IterableDataset", "path": "olmo/data/iterable_dataset.py", "snippet": "class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]):\n \"\"\"\n Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence\n as an IterableDataset that can be deterministically restarted at any point by setting `start_index`,\n which should be a multiple of your global batch size.\n Similarly `max_examples`, if set, should be a multiple of global batch size.\n \"\"\"\n\n def __init__(\n self,\n dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]],\n *,\n seed: int = 0,\n start_index: int = 0,\n max_examples: Optional[int] = None,\n shuffle: bool = True,\n drop_last: bool = False,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n work_dir: Optional[PathOrStr] = None,\n ):\n self.dataset = dataset\n self.seed = seed\n self.start_index = start_index\n self.max_examples = max_examples\n self.shuffle = shuffle\n self.drop_last = drop_last\n self.rank = rank if rank is not None else get_global_rank()\n self.world_size = world_size if world_size is not None else get_world_size()\n # If the dataset length is evenly divisible by # of replicas, then there\n # is no need to drop any data, since the dataset will be split equally.\n if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type]\n # Split to nearest available length that is evenly divisible by world size.\n # This is to ensure each rank receives the same amount of data.\n num_samples = math.ceil(\n (len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type]\n )\n else:\n num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type]\n self.total_size = num_samples * self.world_size\n self.global_indices_file: Optional[Path] = None\n if work_dir is not None:\n self.global_indices_file = Path(work_dir) / \"global_indices.npy\"\n if self.rank == 0:\n log.info(\"Saving global data order indices...\")\n self.global_indices_file.parent.mkdir(parents=True, exist_ok=True)\n global_indices = self._build_global_indices()\n global_indices_mmap = np.memmap(\n self.global_indices_file, dtype=np.uint64, mode=\"w+\", shape=(len(global_indices),)\n )\n global_indices_mmap[:] = global_indices\n global_indices_mmap.flush()\n del global_indices_mmap\n log.info(\"Global data order indices saved to '%s'\", self.global_indices_file)\n barrier()\n\n def _build_global_indices(self) -> List[int]:\n if self.shuffle:\n # Deterministically shuffle based on epoch and seed\n # Torch built-in randomness is not very random, so we use numpy.\n rng = np.random.Generator(np.random.PCG64(seed=self.seed))\n indices = np.arange(len(self.dataset))\n rng.shuffle(indices)\n indices = list(indices)\n else:\n indices = list(range(len(self.dataset))) # type: ignore[arg-type]\n\n if not self.drop_last:\n # Add extra samples to make it evenly divisible\n padding_size = self.total_size - len(indices)\n if padding_size <= len(indices):\n indices += indices[:padding_size]\n else:\n indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]\n else:\n # Remove tail of data to make it evenly divisible.\n indices = indices[: self.total_size]\n assert len(indices) == self.total_size\n return indices\n\n def get_global_indices(self) -> Sequence[int]:\n if self.global_indices_file is not None:\n return np.memmap(self.global_indices_file, mode=\"r\", dtype=np.uint64) # type: ignore\n else:\n return self._build_global_indices()\n\n def __iter__(self) -> Iterator[Dict[str, Any]]:\n indices = self.get_global_indices()\n\n # Truncate to max_examples.\n if self.max_examples is not None:\n assert self.max_examples % self.world_size == 0\n indices = indices[: self.max_examples]\n\n # Start at the specified index.\n if self.start_index > 0:\n assert self.start_index % self.world_size == 0\n indices = indices[self.start_index :]\n\n # Slice indices by rank to avoid duplicates.\n indices = indices[self.rank : self.total_size : self.world_size]\n\n # Lastly, slice the indices by data loader worker rank to avoid duplicates.\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n indices = indices[worker_info.id :: worker_info.num_workers]\n\n # Convert to a list at this point so we don't have to rely on memory-mapping.\n if isinstance(indices, np.memmap):\n indices_list = indices.tolist() # type: ignore\n else:\n indices_list = indices\n del indices\n\n return (self._get_dataset_item(int(idx)) for idx in indices_list)\n\n def _get_dataset_item(self, idx: int) -> Dict[str, Any]:\n item = self.dataset[idx]\n if isinstance(item, dict):\n return dict(**item, index=idx)\n else:\n return {\"input_ids\": item, \"index\": idx}" }, { "identifier": "Evaluator", "path": "olmo/eval/evaluator.py", "snippet": "class Evaluator:\n label: str\n type: EvaluatorType\n eval_loader: DataLoader\n eval_metric: Union[Metric, Dict[str, Metric]]\n subset_num_batches: Optional[int] = None\n\n def reset_metrics(self) -> None:\n if isinstance(self.eval_metric, Metric):\n self.eval_metric.reset()\n else:\n for metric in self.eval_metric.values():\n metric.reset()\n\n def compute_metrics(self) -> Dict[str, float]:\n if self.type == EvaluatorType.downstream:\n assert isinstance(self.eval_metric, ICLMetric)\n return {\n f\"eval/downstream/{self.label}_{self.eval_metric.metric_type}\": self.eval_metric.compute().item(),\n }\n elif self.type == EvaluatorType.lm:\n # Metric(s) = cross entropy loss\n metrics: Dict[str, Metric]\n if isinstance(self.eval_metric, Metric):\n metrics = {self.label: self.eval_metric}\n else:\n metrics = self.eval_metric\n out = {}\n for label in sorted(metrics.keys()):\n metric = metrics[label]\n assert isinstance(metric, MeanMetric)\n if metric.weight.item() == 0.0: # type: ignore\n # In this case we probably haven't called '.update()' on this metric yet,\n # so we do so here with dummy values. Since we pass 0.0 in for weight this won't\n # affect the final value.\n # This can happen when the evaluator contains multiple tasks/datasets and we didn't\n # get to this one within the current evaluation loop.\n metric.update(0.0, 0.0)\n loss = metric.compute()\n if loss.isnan().item():\n # This can happen when the evaluator contains multiple tasks/datasets and we didn't\n # get to this one within the current evaluation loop.\n continue\n else:\n out[f\"eval/{label}/CrossEntropyLoss\"] = loss.item()\n out[f\"eval/{label}/Perplexity\"] = (2**(loss)).item()\n return out\n else:\n raise ValueError(f\"Unexpected evaluator type '{self.type}'\")\n\n def update_metrics(\n self,\n batch: Dict[str, Any],\n ce_loss: torch.Tensor,\n logits: torch.Tensor,\n matformer_factor = 1\n ) -> None:\n if self.type == EvaluatorType.downstream:\n assert isinstance(self.eval_metric, ICLMetric)\n self.eval_metric.update(batch, logits) # type: ignore\n elif self.type == EvaluatorType.lm:\n # Metric(s) = cross entropy loss\n for metadata, instance_loss in zip(batch[\"metadata\"], ce_loss):\n if isinstance(self.eval_metric, dict):\n metric = self.eval_metric[metadata[\"label\"]]\n else:\n metric = self.eval_metric\n metric.update(instance_loss)\n else:\n raise ValueError(f\"Unexpected evaluator type '{self.type}'\")" }, { "identifier": "OlmoConfigurationError", "path": "olmo/exceptions.py", "snippet": "class OlmoConfigurationError(OlmoError):\n \"\"\"\n An error with a configuration file.\n \"\"\"" }, { "identifier": "Olmo", "path": "olmo/model.py", "snippet": "class Olmo(nn.Module):\n def __init__(self, config: ModelConfig, init_params: bool = True):\n super().__init__()\n self.config = config\n\n # Validate config.\n if self.config.alibi and self.config.flash_attention:\n raise OlmoConfigurationError(\"ALiBi is currently not supported with FlashAttention\")\n\n if self.config.alibi and self.config.rope:\n raise OlmoConfigurationError(\"ALiBi and RoPE are mutually exclusive\")\n\n if self.config.embedding_size is not None and self.config.embedding_size != self.config.vocab_size:\n if self.config.embedding_size < self.config.vocab_size:\n raise OlmoConfigurationError(\"embedding size should be at least as big as vocab size\")\n elif self.config.embedding_size % 128 != 0:\n import warnings\n\n warnings.warn(\n \"Embedding size is not a multiple of 128! This could hurt throughput performance.\", UserWarning\n )\n\n torch.backends.cuda.enable_flash_sdp(self.config.flash_attention)\n torch.backends.cuda.enable_mem_efficient_sdp(False) # this is super slow so make sure torch won't use it\n\n self.transformer = nn.ModuleDict(\n dict(\n wte=nn.Embedding(\n config.embedding_size or config.vocab_size, config.d_model, device=config.init_device\n ),\n emb_drop=nn.Dropout(config.embedding_dropout),\n blocks=nn.ModuleList([OlmoBlock.build(config) for _ in range(config.n_layers)]),\n ln_f=LayerNorm.build(config),\n )\n )\n if not (self.config.alibi or self.config.rope):\n self.transformer.update(\n {\"wpe\": nn.Embedding(config.max_sequence_length, config.d_model, device=config.init_device)}\n )\n if init_params and self.config.init_device != \"meta\":\n self.apply(self.param_init_fn)\n self.__num_fwd_flops: Optional[int] = None\n\n # Attention bias cache.\n # We could cache these as buffers, but we've run into various issues doing that with FSDP.\n # In general it appears the way FSDP handles buffers is not well-defined.\n # It doesn't shard them but apparently it does synchronize them across processes, which we want to avoid\n # since (A) it isn't necessary, and (B) we have `-inf` in these biases which might get turned into\n # NaNs when they're synchronized due to casting or some other issue.\n self.__bias_cache: Dict[str, Optional[torch.FloatTensor]] = {\n \"causal_attention_bias\": None,\n \"alibi_attention_bias\": None,\n }\n if self.config.alibi:\n # Warm up cache.\n self.causal_attention_bias\n self.alibi_attention_bias\n\n @property\n def device(self) -> torch.device:\n device: torch.device = self.transformer.wte.weight.device # type: ignore\n if device.type == \"meta\":\n if self.config.init_device is not None and self.config.init_device != \"meta\":\n return torch.device(self.config.init_device)\n else:\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n return device\n\n @property\n def causal_attention_bias(self) -> torch.FloatTensor:\n causal_bias = self.__bias_cache[\"causal_attention_bias\"]\n if causal_bias is None:\n causal_bias = causal_attention_bias(self.config, self.device)\n self.__bias_cache[\"causal_attention_bias\"] = causal_bias\n elif causal_bias.device != self.device: # in case model was moved to different device\n causal_bias = causal_bias.to(device=self.device)\n self.__bias_cache[\"causal_attention_bias\"] = causal_bias # type: ignore\n return causal_bias # type: ignore\n\n @property\n def alibi_attention_bias(self) -> torch.FloatTensor:\n alibi_bias = self.__bias_cache[\"alibi_attention_bias\"]\n if alibi_bias is None:\n alibi_bias = alibi_attention_bias(self.config, self.device)\n self.__bias_cache[\"alibi_attention_bias\"] = alibi_bias\n elif alibi_bias.device != self.device: # in case model was moved to different device\n alibi_bias = alibi_bias.to(device=self.device)\n self.__bias_cache[\"alibi_attention_bias\"] = alibi_bias # type: ignore\n return alibi_bias # type: ignore\n\n def forward(\n self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n attention_bias: Optional[torch.Tensor] = None,\n past_key_values: Optional[Sequence[Tuple[torch.Tensor, torch.Tensor]]] = None,\n use_cache: bool = False,\n last_logits_only: bool = False,\n ) -> OlmoOutput:\n \"\"\"\n :param input_ids: A tensor of shape `(batch_size, seq_len)`.\n :param attention_mask: A tensor of shape `(batch_size, seq_len)` that indicates\n which input IDs are masked. A `1` value in the mask means that\n the corresponding input ID should *not* be ignored. A `0` means\n that the corresponding input ID is masked.\n\n This has the same meaning as the `attention_mask` in HuggingFace's `transformers`\n library.\n :param attention_bias: A tensor of shape `(batch_size, 1, seq_len, seq_len)`,\n `(1, 1, seq_len, seq_len)`, or `(seq_len, seq_len)`. This is used\n to introduce causal or other biases.\n\n If the tensor is a bool or byte tensor, a `True` or `1` at `attention_bias[:, :, i, j]`\n indicates that the i-th element in the sequence is allowed to attend to the j-th\n element in the sequence.\n\n If the tensor is a float tensor, it will just be added to the attention\n scores before the softmax.\n\n The default is causal, which corresponds to a lower-diagonal byte matrix of ones.\n :param past_key_values: Pre-computed keys and values for each attention block.\n Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n :param use_cache: If `True`, return key and value tensors for each block.\n :param last_logits_only: If `True`, only compute the logits for the last token of each sequence.\n This can speed up decoding when you only care about the next token.\n \"\"\"\n if past_key_values:\n assert len(past_key_values) == self.config.n_layers\n\n batch_size, seq_len = input_ids.size()\n assert seq_len <= self.config.max_sequence_length, (\n f\"Cannot forward input with seq_len={seq_len}, \"\n f\"this model only supports seq_len<={self.config.max_sequence_length}\"\n )\n\n # Get embeddings of input.\n # shape: (batch_size, seq_len, d_model)\n x = self.transformer.wte(input_ids) # type: ignore\n\n if not (self.config.alibi or self.config.rope):\n # Get positional embeddings.\n if past_key_values is None:\n past_length = 0\n else:\n past_length = past_key_values[0][0].size(-2)\n # shape: (1, seq_len)\n pos = torch.arange(\n past_length, past_length + seq_len, dtype=torch.long, device=input_ids.device\n ).unsqueeze(0)\n # shape: (1, seq_len, d_model)\n pos_emb = self.transformer.wpe(pos) # type: ignore\n x = pos_emb + x\n\n # Add input + positional embeddings and apply dropout.\n # shape: (batch_size, seq_len, d_model)\n x = self.transformer.emb_drop(x) # type: ignore\n\n # Transform the attention mask into what the blocks expect.\n if attention_mask is not None:\n # shape: (batch_size, 1, 1, seq_len)\n attention_mask = attention_mask.to(dtype=x.dtype).view(batch_size, -1)[:, None, None, :]\n attention_mask = (1.0 - attention_mask) * torch.finfo(attention_mask.dtype).min\n attention_mask.masked_fill_(attention_mask == 1.0, float(\"-inf\"))\n\n # Merge attention mask with attention bias.\n if (\n attention_bias is not None\n or attention_mask is not None\n or self.config.alibi\n # NOTE (epwalsh): we need to initialize the attn bias in order for attn to work properly\n # with key+value cache. Otherwise `F.scaled_dot_product_attention()` doesn't seem to compute\n # scores correctly.\n or past_key_values is not None\n ):\n if attention_bias is None and self.config.alibi:\n attention_bias = self.causal_attention_bias + self.alibi_attention_bias\n elif attention_bias is None:\n attention_bias = self.causal_attention_bias\n elif attention_bias.dtype in (torch.int8, torch.bool):\n attention_bias = attention_bias.to(dtype=x.dtype)\n attention_bias.masked_fill_(attention_bias == 0.0, float(\"-inf\"))\n\n # Transform to the right shape and data type.\n mask_len = seq_len\n if attention_mask is not None:\n mask_len = attention_mask.shape[-1]\n elif past_key_values is not None:\n mask_len = past_key_values[0][0].shape[-2] + input_ids.shape[-1]\n attention_bias = attention_bias[:, :, :mask_len, :mask_len].to(x.dtype)\n\n # Add in the masking bias.\n if attention_mask is not None:\n attention_bias = attention_bias + attention_mask\n\n attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None\n\n # Apply blocks one-by-one.\n for block, layer_past in zip(\n self.transformer.blocks, # type: ignore\n past_key_values or [None] * self.config.n_layers, # type: ignore\n ):\n # shape: (batch_size, seq_len, d_model)\n x, cache = block(x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache)\n if attn_key_values is not None:\n assert cache is not None\n attn_key_values.append(cache)\n\n if last_logits_only:\n # shape: (batch_size, 1, d_model)\n x = x[:, -1, :].unsqueeze(1)\n\n # Apply final layer norm.\n # shape: (batch_size, seq_len or 1, d_model)\n x = self.transformer.ln_f(x) # type: ignore\n\n # Get logits.\n # shape: (batch_size, seq_len or 1, vocab_size)\n logits = F.linear(x, self.transformer.wte.weight, None) # type: ignore\n\n return OlmoOutput(logits=logits, attn_key_values=attn_key_values) # type: ignore[arg-type]\n\n def fsdp_wrap_fn(self, module, recurse: bool = True, nonwrapped_numel: int = 0):\n del recurse, nonwrapped_numel\n return isinstance(module, OlmoBlock)\n\n def activation_checkpointing_fn(self, module):\n return isinstance(module, OlmoBlock)\n\n def reset_parameters(self):\n self.apply(self.param_init_fn)\n\n def param_init_fn(self, module):\n from functools import partial\n\n init_fn = partial(nn.init.normal_, mean=0.0, std=self.config.init_std)\n\n def fused_init_fn(module):\n # Parameter initialization is often based on the parameters shape.\n # If a layer is fused, initialization should be based on the shapes\n # of the original tensor instead of the shape of the fused tensor.\n # Layers which are fused should have the _fused attribute defined.\n # The first element of _fused is the dimension along which the tensor is fused.\n # This is followed by an iterable of split indices.\n _fused = getattr(module, \"_fused\", None)\n if _fused is None:\n raise RuntimeError(\"Internal logic error\")\n\n dim, splits = _fused\n splits = (0, *splits, module.weight.size(dim))\n for s, e in zip(splits[:-1], splits[1:]):\n slice_indices = [slice(None)] * module.weight.ndim\n slice_indices[dim] = slice(s, e)\n init_fn(module.weight[slice_indices])\n\n # Linear\n if isinstance(module, nn.Linear):\n if hasattr(module, \"_fused\"):\n fused_init_fn(module)\n else:\n init_fn(module.weight)\n\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n\n if getattr(module, \"_is_residual\", False):\n with torch.no_grad():\n module.weight.div_(math.sqrt(2 * self.config.n_layers))\n\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n\n # Embedding\n if isinstance(module, nn.Embedding):\n init_fn(module.weight)\n\n # LayerNorm\n if isinstance(module, (nn.LayerNorm, LayerNorm, RMSLayerNorm)):\n torch.nn.init.ones_(module.weight)\n torch.nn.init.zeros_(module.bias)\n\n def num_params(self, include_embedding: bool = True) -> int:\n \"\"\"\n Get the total number of parameters.\n \"\"\"\n params = (np for np in self.named_parameters())\n if not include_embedding:\n params = filter( # type: ignore\n lambda np: \".wte.\" not in np[0] and \".wpe.\" not in np[0],\n params,\n )\n return sum(p.numel() for _, p in params)\n\n @property\n def num_fwd_flops(self):\n if self.__num_fwd_flops:\n return self.__num_fwd_flops\n n_params = self.num_params()\n # the number of parameters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.config.max_sequence_length\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = (\n self.config.n_layers * 2 * 2 * (self.config.d_model * (self.config.max_sequence_length**2))\n )\n self.__num_fwd_flops = params_flops_per_seq + attn_flops_per_seq\n return self.__num_fwd_flops\n\n def generate(\n self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n attention_bias: Optional[torch.Tensor] = None,\n max_steps: int = 10,\n beam_size: int = 1,\n per_node_beam_size: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n min_steps: Optional[int] = None,\n final_sequence_scorer: Optional[FinalSequenceScorer] = None,\n constraints: Optional[List[Constraint]] = None,\n ) -> OlmoGenerateOutput:\n \"\"\"\n Generate token IDs using beam search.\n\n Note that by default ``beam_size`` is set to 1, which is greedy decoding.\n\n :param input_ids: A tensor of shape `(batch_size, seq_len)`.\n :param attention_mask: A optional tensor of shape `(batch_size, seq_len)`, the same\n as for the forward method.\n :param attention_bias: A tensor of shape\n `(batch_size, 1, seq_len + tokens_to_generate, seq_len + tokens_to_generate)`,\n the same as for the forward method except only one shape is excepted here.\n\n For an explanation of the other arguments, see the :class:`BeamSearch` class.\n \"\"\"\n beam_search = BeamSearch(\n self.config.eos_token_id,\n max_steps=max_steps,\n beam_size=beam_size,\n per_node_beam_size=per_node_beam_size,\n sampler=sampler,\n min_steps=min_steps,\n final_sequence_scorer=final_sequence_scorer,\n constraints=constraints,\n )\n\n # Validate inputs.\n batch_size, seq_len = input_ids.shape\n if attention_mask is not None:\n assert attention_mask.shape == (batch_size, seq_len)\n if attention_bias is not None:\n assert len(attention_bias.shape) == 4\n assert attention_bias.shape[:2] == (batch_size, 1)\n assert (\n seq_len + beam_search.max_steps\n <= attention_bias.shape[2]\n == attention_bias.shape[3]\n <= self.config.max_sequence_length\n )\n\n tokens_generated = 0\n\n def flatten_past_key_values(\n past_key_values: List[Tuple[torch.Tensor, torch.Tensor]]\n ) -> Dict[str, torch.Tensor]:\n out = {}\n for i, (key, value) in enumerate(past_key_values):\n out[f\"past_key_{i}\"] = key\n out[f\"past_value_{i}\"] = value\n return out\n\n def unflatten_past_key_values(\n past_key_values: Dict[str, torch.Tensor]\n ) -> List[Tuple[torch.Tensor, torch.Tensor]]:\n out = []\n for i in range(self.config.n_layers):\n past_key = past_key_values[f\"past_key_{i}\"]\n past_value = past_key_values[f\"past_value_{i}\"]\n out.append((past_key, past_value))\n return out\n\n def step(\n last_predictions: torch.Tensor, state: dict[str, torch.Tensor]\n ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:\n nonlocal tokens_generated\n\n attention_mask = state.get(\"attention_mask\")\n attention_bias = state.get(\"attention_bias\")\n\n if tokens_generated > 0:\n past_key_values = unflatten_past_key_values(state)\n input_ids = last_predictions.unsqueeze(1)\n if attention_mask is not None:\n group_size = input_ids.shape[0]\n attention_mask = torch.cat((attention_mask, attention_mask.new_ones((group_size, 1))), dim=-1)\n else:\n past_key_values = None\n input_ids = state[\"input_ids\"]\n\n tokens_generated += 1\n\n # Run forward pass of model to get logits, then normalize to get log probs.\n output = self(\n input_ids,\n attention_mask=attention_mask,\n attention_bias=attention_bias,\n past_key_values=past_key_values,\n use_cache=True,\n last_logits_only=True,\n )\n log_probs = F.log_softmax(output.logits[:, -1, :], dim=-1)\n\n # Create new state.\n state = flatten_past_key_values(output.attn_key_values)\n if attention_mask is not None:\n state[\"attention_mask\"] = attention_mask\n if attention_bias is not None:\n state[\"attention_bias\"] = attention_bias\n\n return log_probs, state\n\n initial_preds = input_ids.new_zeros((batch_size,)) # This is arbitrary, we won't use this.\n state: dict[str, torch.Tensor] = {\"input_ids\": input_ids}\n if attention_mask is not None:\n state[\"attention_mask\"] = attention_mask\n if attention_bias is not None:\n state[\"attention_bias\"] = attention_bias\n with torch.no_grad():\n token_ids, scores = beam_search.search(initial_preds, state, step)\n\n return OlmoGenerateOutput(\n token_ids=token_ids, # type: ignore[arg-type]\n scores=scores, # type: ignore[arg-type]\n )\n\n @classmethod\n def from_checkpoint(cls, checkpoint_dir: PathOrStr, device: str = \"cpu\") -> Olmo:\n \"\"\"\n Load an OLMo model from a checkpoint.\n \"\"\"\n from cached_path import cached_path\n\n # Load config.\n config_path = cached_path(os.path.join(checkpoint_dir, \"config.yaml\"))\n model_config = ModelConfig.load(config_path, key=\"model\", validate_paths=False)\n\n # Initialize model (always on CPU to start with so we don't run out of GPU memory).\n model_config.init_device = \"cpu\"\n model = Olmo(model_config)\n model.config.init_device = device\n\n # Load state dict directly to target device.\n state_dict_path = cached_path(os.path.join(checkpoint_dir, \"model.pt\"))\n state_dict = torch.load(state_dict_path, map_location=\"cpu\")\n model.load_state_dict(model._make_state_dict_compatible(state_dict))\n\n return model.to(torch.device(device)).eval()\n\n def _make_state_dict_compatible(self, state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n # For backwards compatibility prior to fixing https://github.com/allenai/LLM/issues/222\n if self.config.block_type == BlockType.sequential:\n for block_idx in range(self.config.n_layers):\n norm_w_key = f\"transformer.blocks.{block_idx}.norm.weight\"\n norm_b_key = f\"transformer.blocks.{block_idx}.norm.bias\"\n if norm_w_key in state_dict:\n norm_w = state_dict.pop(norm_w_key)\n state_dict[f\"transformer.blocks.{block_idx}.attn_norm.weight\"] = norm_w\n state_dict[f\"transformer.blocks.{block_idx}.ff_norm.weight\"] = norm_w.clone()\n if norm_b_key in state_dict:\n norm_b = state_dict.pop(norm_b_key)\n state_dict[f\"transformer.blocks.{block_idx}.attn_norm.bias\"] = norm_b\n state_dict[f\"transformer.blocks.{block_idx}.ff_norm.bias\"] = norm_b.clone()\n return state_dict" }, { "identifier": "MatformerManager", "path": "olmo/model.py", "snippet": "class MatformerManager:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.current_factor = 1\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance" }, { "identifier": "set_new_base_lr", "path": "olmo/optim.py", "snippet": "def set_new_base_lr(\n optim: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler.LRScheduler, new_base_lr: float\n):\n \"\"\"\n Set a new base learning rate in the optimizer and scheduler.\n \"\"\"\n # Hack scheduler state to start with the new base LR.\n if isinstance(scheduler, torch.optim.lr_scheduler.SequentialLR):\n # Update 'base_lr' for all sub-schedulers.\n for sched in scheduler._schedulers: # type: ignore\n sched.base_lrs = [new_base_lr] * len(sched.base_lrs)\n\n # Update '_last_lr' for current sub-scheduler.\n current_sched = scheduler._schedulers[bisect_right(scheduler._milestones, scheduler.last_epoch)] # type: ignore\n if hasattr(current_sched, \"_get_closed_form_lr\"):\n current_sched._last_lr = current_sched._get_closed_form_lr()\n elif isinstance(current_sched, torch.optim.lr_scheduler.LambdaLR):\n current_sched._last_lr = current_sched.get_lr() # type: ignore\n else:\n raise NotImplementedError\n scheduler._last_lr = current_sched.get_last_lr() # type: ignore\n else:\n raise NotImplementedError\n\n # Update LR in optimizer.\n for param_group, new_lr in zip(optim.param_groups, scheduler.get_last_lr()):\n param_group[\"lr\"] = new_lr\n param_group[\"initial_lr\"] = new_base_lr" }, { "identifier": "barrier", "path": "olmo/util.py", "snippet": "def barrier() -> None:\n if dist.is_available() and dist.is_initialized():\n dist.barrier()" }, { "identifier": "get_global_rank", "path": "olmo/util.py", "snippet": "def get_global_rank() -> int:\n return int(os.environ.get(\"RANK\") or dist.get_rank())" }, { "identifier": "get_world_size", "path": "olmo/util.py", "snippet": "def get_world_size() -> int:\n if dist.is_available() and dist.is_initialized():\n return dist.get_world_size()\n else:\n return 1" }, { "identifier": "move_to_device", "path": "olmo/util.py", "snippet": "def move_to_device(o: T, device: torch.device) -> T:\n if isinstance(o, torch.Tensor):\n return o.to(device) # type: ignore[return-value]\n elif isinstance(o, dict):\n return {k: move_to_device(v, device) for k, v in o.items()} # type: ignore[return-value]\n elif isinstance(o, list):\n return [move_to_device(x, device) for x in o] # type: ignore[return-value]\n elif isinstance(o, tuple):\n return tuple((move_to_device(x, device) for x in o)) # type: ignore[return-value]\n else:\n return o" }, { "identifier": "peak_gpu_memory", "path": "olmo/util.py", "snippet": "def peak_gpu_memory(reset: bool = False) -> Optional[float]:\n \"\"\"\n Get the peak GPU memory usage in MB across all ranks.\n Only rank 0 will get the final result.\n \"\"\"\n if not torch.cuda.is_available():\n return None\n\n device = torch.device(\"cuda\")\n peak_mb = torch.cuda.max_memory_allocated(device) / 1000000\n if dist.is_available() and dist.is_initialized():\n peak_mb_tensor = torch.tensor(peak_mb, device=device)\n dist.reduce(peak_mb_tensor, 0, dist.ReduceOp.MAX)\n peak_mb = peak_mb_tensor.item()\n\n if reset:\n # Reset peak stats.\n torch.cuda.reset_max_memory_allocated(device)\n\n return peak_mb" }, { "identifier": "resource_path", "path": "olmo/util.py", "snippet": "def resource_path(folder: PathOrStr, fname: str) -> PathOrStr:\n if is_url(folder):\n from cached_path import cached_path\n\n return cached_path(f\"{folder}/{fname}\")\n else:\n return Path(folder) / fname" }, { "identifier": "syncronize_flag", "path": "olmo/util.py", "snippet": "def syncronize_flag(flag: bool, device: torch.device) -> bool:\n if dist.is_available() and dist.is_initialized():\n flag_tensor = torch.tensor(flag, device=device)\n dist.broadcast(flag_tensor, 0)\n return flag_tensor.item() # type: ignore\n else:\n return flag" }, { "identifier": "upload", "path": "olmo/util.py", "snippet": "def upload(source: PathOrStr, target: str, save_overwrite: bool = False):\n \"\"\"Upload source file to a target location on GCS or S3.\"\"\"\n from urllib.parse import urlparse\n\n source = Path(source)\n assert source.is_file()\n parsed = urlparse(target)\n if parsed.scheme == \"gs\":\n _gcs_upload(source, parsed.netloc, parsed.path, save_overwrite=save_overwrite)\n elif parsed.scheme == \"s3\":\n _s3_upload(source, parsed.netloc, parsed.path, save_overwrite=save_overwrite)\n else:\n raise NotImplementedError(f\"Upload not implemented for '{parsed.scheme}' scheme\")" }, { "identifier": "wait_on", "path": "olmo/util.py", "snippet": "def wait_on(condition: Callable[[], bool], description: str, timeout: float = 10.0):\n \"\"\"Wait on the condition function to return True.\"\"\"\n start_time = time.monotonic()\n while not condition():\n time.sleep(0.5)\n if time.monotonic() - start_time > timeout:\n raise TimeoutError(f\"{description} timed out\")" } ]
import logging import math import random import shutil import time import numpy as np import torch import torch.nn.functional as F import wandb from collections import deque from dataclasses import dataclass, field from itertools import islice from pathlib import Path from typing import Any, Deque, Dict, List, Optional, TextIO, Tuple from packaging import version from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.distributed.fsdp.api import ( FullOptimStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, ) from torch.utils.data import DataLoader from torchmetrics import MeanMetric from .aliases import PathOrStr from .config import CheckpointType, SpeedMonitorConfig, TrainConfig from .data import IterableDataset from .eval import Evaluator from .exceptions import OlmoConfigurationError from .model import Olmo, MatformerManager from .optim import set_new_base_lr from .util import ( barrier, get_global_rank, get_world_size, move_to_device, peak_gpu_memory, resource_path, syncronize_flag, upload, wait_on, )
12,939
self.global_train_tokens_seen = state_dict.get( # newer addition "global_train_tokens_seen", self.global_data_step * self.cfg.global_train_batch_size * self.cfg.model.max_sequence_length, ) if not self.cfg.restore_dataloader: self.global_data_step = 0 self.global_train_examples_seen = 0 self.global_train_tokens_seen = 0 elif self.cfg.fast_forward_batches: self.global_data_step += self.cfg.fast_forward_batches # Technically we don't "see" these batches that we fast-forward through, but we use # this variable to update the position of the dataset so we need to include them here. self.global_train_examples_seen += self.cfg.fast_forward_batches * self.cfg.global_train_batch_size # NOTE: on the other hand we don't add anything to 'self.global_train_tokens_seen' here because # that variable is meant to track the actual number of tokens trained on. if self.global_data_step > 0: if self.global_data_step > self.global_step: log.info( f"Fast-forwarding data loader to step {self.global_step:,d}+{self.global_data_step-self.global_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) else: log.info( f"Fast-forwarding data loader to step {self.global_data_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) assert isinstance(self.train_loader.dataset, IterableDataset) self.train_loader.dataset.start_index = self.global_train_examples_seen if not self.cfg.restore_base_learning_rate: # Reset base learning rate to the value in the config, not the checkpoint. set_new_base_lr(self.optim, self.scheduler, self.cfg.optimizer.learning_rate) # RNG states. if "rng" in state_dict: rng_state = state_dict["rng"] self.restore_rng_state(rng_state) def restore_rng_state(self, rng_state: Dict[str, Any]) -> None: random.setstate(rng_state["python"]) np.random.set_state(rng_state["numpy"]) torch.set_rng_state(rng_state["torch"]) torch.cuda.set_rng_state(rng_state["cuda"]) def save_sharded_checkpoint(self) -> Path: checkpoint_dir = Path(self.cfg.save_folder) / f"step{self.global_step}" checkpoint_dir_tmp = Path(self.cfg.save_folder) / f"step{self.global_step}-tmp" try: next(checkpoint_dir.glob("*")) if self.cfg.save_overwrite: if get_global_rank() == 0: shutil.rmtree(checkpoint_dir) else: raise OlmoConfigurationError( f"Checkpoint for step {self.global_step} already exists, use --save-overwrite to overwrite it" ) except StopIteration: pass if get_global_rank() == 0: checkpoint_dir_tmp.mkdir(parents=True, exist_ok=True) self.checkpoints.append(checkpoint_dir) barrier() # Flush data indices file. if self.indices_file is not None: self.indices_file.flush() # Write the checkpoint. with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=True), optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True), ): # NOTE: Alternatively we could use the checkpointing method in this test # https://github.com/pytorch/pytorch/blob/main/test/distributed/checkpoint/test_fsdp_optim_state.py # but we've had issues with that on AMD GPUs. See # https://github.com/pytorch/pytorch/issues/100041 # checkpoint.save_state_dict(self.state_dict(), checkpoint.FileSystemWriter(checkpoint_dir)) torch.save(self.state_dict(), checkpoint_dir_tmp / f"rank{get_global_rank()}.pt") # Save config too. if get_global_rank() == 0: self.cfg.save(checkpoint_dir_tmp / "config.yaml") barrier() if get_global_rank() == 0: # Replace temp directory with target checkpoint directory. checkpoint_dir_tmp.replace(checkpoint_dir) # Link to 'latest'. latest_path = Path(self.cfg.save_folder) / "latest" latest_path.unlink(missing_ok=True) latest_path.symlink_to(checkpoint_dir.name, target_is_directory=True) # In the cases where we're using a shared NFS drive between ranks to save checkpoints, # replacing the temp directory with the final directory from rank 0 might not be immediately # realized in the file systems of the other ranks. # So we wait here across all ranks until that final checkpoint directory is visible. wait_on(lambda: checkpoint_dir.exists(), "Waiting for checkpoint directory", timeout=10.0) # Remove old checkpoints. if self.cfg.save_num_checkpoints_to_keep > 0: while len(self.checkpoints) > self.cfg.save_num_checkpoints_to_keep: self.remove_sharded_checkpoint(0) barrier() # Upload checkpoint to bucket. if self.cfg.remote_save_folder is not None: files_to_upload = [f"rank{get_global_rank()}.pt"] if get_global_rank() == 0: files_to_upload.append("config.yaml") for fname in files_to_upload: source = checkpoint_dir / fname target = f"{self.cfg.remote_save_folder}/{checkpoint_dir.name}/{fname}" log.info(f"Uploading {source} to {target}...")
from __future__ import annotations __all__ = ["SpeedMonitor", "LRMonitor", "Trainer"] log = logging.getLogger(__name__) @dataclass class SpeedMonitor: cfg: SpeedMonitorConfig start_times: Deque[float] = field(default_factory=lambda: deque([])) global_total_tokens: int = 0 device_interval_tokens: Deque[int] = field(default_factory=lambda: deque([])) def batch_start(self, global_total_tokens: int, device_batch_num_tokens: int, record: bool = True) -> None: self.global_total_tokens = global_total_tokens if record: if len(self.start_times) >= self.cfg.window_size: self.start_times.popleft() self.device_interval_tokens.popleft() self.start_times.append(time.monotonic()) self.device_interval_tokens.append(device_batch_num_tokens) def reset(self) -> None: self.start_times.clear() self.device_interval_tokens.clear() def check(self) -> Dict[str, float]: metrics: Dict[str, float] = {"throughput/total_tokens": self.global_total_tokens} if self.start_times: interval_seconds = time.monotonic() - self.start_times[0] interval_batches = len(self.start_times) interval_tokens = sum(self.device_interval_tokens) metrics["throughput/device/tokens_per_second"] = interval_tokens / interval_seconds metrics["throughput/device/batches_per_second"] = interval_batches / interval_seconds return metrics @dataclass class LRMonitor: optim: torch.optim.Optimizer def check(self) -> Dict[str, float]: lrs = [group["lr"] for group in self.optim.param_groups] return {f"optim/learning_rate_group{idx}": lr for idx, lr in enumerate(lrs)} @dataclass class Trainer: cfg: TrainConfig model: Olmo fsdp_model: FSDP optim: torch.optim.Optimizer scheduler: torch.optim.lr_scheduler.LRScheduler train_loader: DataLoader device: torch.device evaluators: List[Evaluator] ce_train_loss_metric: MeanMetric z_train_loss_metric: Optional[MeanMetric] = None global_step: int = 0 global_data_step: int = 0 """This is now redundant since adding 'global_train_examples_seen'.""" global_train_examples_seen: int = 0 """Tracks the global number of training examples seen for the purpose of restoring the dataset position on restarts.""" global_train_tokens_seen: int = 0 """Tracks the global total number of tokens trained on.""" checkpoints: List[Path] = field(default_factory=list) unsharded_checkpoints: List[Path] = field(default_factory=list) min_train_loss: float = float("inf") indices_file: Optional[TextIO] = None def state_dict(self) -> Dict[str, Any]: state_dict = self.non_tensor_state_dict() state_dict["model"] = self.fsdp_model.state_dict() state_dict["optim"] = FSDP.optim_state_dict(self.fsdp_model, self.optim) return state_dict def non_tensor_state_dict(self) -> Dict[str, Any]: return { "scheduler": self.scheduler.state_dict(), "global_step": self.global_step, "global_data_step": self.global_data_step, "global_train_examples_seen": self.global_train_examples_seen, "global_train_tokens_seen": self.global_train_tokens_seen, "checkpoints": self.checkpoints, "unsharded_checkpoints": self.unsharded_checkpoints, "rng": { "python": random.getstate(), "numpy": np.random.get_state(), "torch": torch.random.get_rng_state(), "cuda": torch.cuda.get_rng_state(), }, } def load_non_tensor_state_dict(self, state_dict: Dict[str, Any]) -> None: # Checkpoint paths. self.checkpoints = [ path for path in state_dict["checkpoints"] if path.is_dir() and path.resolve().parent == Path(self.cfg.save_folder).resolve() ] self.unsharded_checkpoints = [ path for path in state_dict["unsharded_checkpoints"] if path.is_dir() and path.resolve().parent == Path(self.cfg.save_folder).resolve() ] # Learning rate scheduler. self.scheduler.load_state_dict(state_dict["scheduler"]) # Dataset / dataloader position. self.global_step = state_dict["global_step"] self.global_data_step = state_dict["global_data_step"] self.global_train_examples_seen = state_dict.get( # newer addition "global_train_examples_seen", self.global_data_step * self.cfg.global_train_batch_size ) self.global_train_tokens_seen = state_dict.get( # newer addition "global_train_tokens_seen", self.global_data_step * self.cfg.global_train_batch_size * self.cfg.model.max_sequence_length, ) if not self.cfg.restore_dataloader: self.global_data_step = 0 self.global_train_examples_seen = 0 self.global_train_tokens_seen = 0 elif self.cfg.fast_forward_batches: self.global_data_step += self.cfg.fast_forward_batches # Technically we don't "see" these batches that we fast-forward through, but we use # this variable to update the position of the dataset so we need to include them here. self.global_train_examples_seen += self.cfg.fast_forward_batches * self.cfg.global_train_batch_size # NOTE: on the other hand we don't add anything to 'self.global_train_tokens_seen' here because # that variable is meant to track the actual number of tokens trained on. if self.global_data_step > 0: if self.global_data_step > self.global_step: log.info( f"Fast-forwarding data loader to step {self.global_step:,d}+{self.global_data_step-self.global_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) else: log.info( f"Fast-forwarding data loader to step {self.global_data_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) assert isinstance(self.train_loader.dataset, IterableDataset) self.train_loader.dataset.start_index = self.global_train_examples_seen if not self.cfg.restore_base_learning_rate: # Reset base learning rate to the value in the config, not the checkpoint. set_new_base_lr(self.optim, self.scheduler, self.cfg.optimizer.learning_rate) # RNG states. if "rng" in state_dict: rng_state = state_dict["rng"] self.restore_rng_state(rng_state) def restore_rng_state(self, rng_state: Dict[str, Any]) -> None: random.setstate(rng_state["python"]) np.random.set_state(rng_state["numpy"]) torch.set_rng_state(rng_state["torch"]) torch.cuda.set_rng_state(rng_state["cuda"]) def save_sharded_checkpoint(self) -> Path: checkpoint_dir = Path(self.cfg.save_folder) / f"step{self.global_step}" checkpoint_dir_tmp = Path(self.cfg.save_folder) / f"step{self.global_step}-tmp" try: next(checkpoint_dir.glob("*")) if self.cfg.save_overwrite: if get_global_rank() == 0: shutil.rmtree(checkpoint_dir) else: raise OlmoConfigurationError( f"Checkpoint for step {self.global_step} already exists, use --save-overwrite to overwrite it" ) except StopIteration: pass if get_global_rank() == 0: checkpoint_dir_tmp.mkdir(parents=True, exist_ok=True) self.checkpoints.append(checkpoint_dir) barrier() # Flush data indices file. if self.indices_file is not None: self.indices_file.flush() # Write the checkpoint. with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=True), optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True), ): # NOTE: Alternatively we could use the checkpointing method in this test # https://github.com/pytorch/pytorch/blob/main/test/distributed/checkpoint/test_fsdp_optim_state.py # but we've had issues with that on AMD GPUs. See # https://github.com/pytorch/pytorch/issues/100041 # checkpoint.save_state_dict(self.state_dict(), checkpoint.FileSystemWriter(checkpoint_dir)) torch.save(self.state_dict(), checkpoint_dir_tmp / f"rank{get_global_rank()}.pt") # Save config too. if get_global_rank() == 0: self.cfg.save(checkpoint_dir_tmp / "config.yaml") barrier() if get_global_rank() == 0: # Replace temp directory with target checkpoint directory. checkpoint_dir_tmp.replace(checkpoint_dir) # Link to 'latest'. latest_path = Path(self.cfg.save_folder) / "latest" latest_path.unlink(missing_ok=True) latest_path.symlink_to(checkpoint_dir.name, target_is_directory=True) # In the cases where we're using a shared NFS drive between ranks to save checkpoints, # replacing the temp directory with the final directory from rank 0 might not be immediately # realized in the file systems of the other ranks. # So we wait here across all ranks until that final checkpoint directory is visible. wait_on(lambda: checkpoint_dir.exists(), "Waiting for checkpoint directory", timeout=10.0) # Remove old checkpoints. if self.cfg.save_num_checkpoints_to_keep > 0: while len(self.checkpoints) > self.cfg.save_num_checkpoints_to_keep: self.remove_sharded_checkpoint(0) barrier() # Upload checkpoint to bucket. if self.cfg.remote_save_folder is not None: files_to_upload = [f"rank{get_global_rank()}.pt"] if get_global_rank() == 0: files_to_upload.append("config.yaml") for fname in files_to_upload: source = checkpoint_dir / fname target = f"{self.cfg.remote_save_folder}/{checkpoint_dir.name}/{fname}" log.info(f"Uploading {source} to {target}...")
upload(source, target, save_overwrite=self.cfg.save_overwrite)
17
2023-11-14 02:24:07+00:00
16k
1in-oos/ccplus
caringcaribou/modules/uds.py
[ { "identifier": "auto_blacklist", "path": "caringcaribou/utils/can_actions.py", "snippet": "def auto_blacklist(bus, duration, classifier_function, print_results):\n \"\"\"Listens for false positives on the CAN bus and generates an arbitration ID blacklist.\n\n Finds all can.Message <msg> on 'bus' where 'classifier_function(msg)' evaluates to True.\n Terminates after 'duration' seconds and returns a set of all matching arbitration IDs.\n Prints progress, time countdown and list of results if 'print_results' is True.\n\n :param bus: CAN bus instance\n :param duration: duration in seconds\n :param classifier_function: function which, when called upon a can.Message instance,\n returns a bool indicating if it should be blacklisted\n :param print_results: whether progress and results should be printed to stdout\n :type bus: can.Bus\n :type duration: float\n :type classifier_function: function\n :type print_results: bool\n :return set of matching arbitration IDs to blacklist\n :rtype set(int)\n \"\"\"\n if print_results:\n print(\"Scanning for arbitration IDs to blacklist\")\n blacklist = set()\n start_time = time.time()\n end_time = start_time + duration\n while time.time() < end_time:\n if print_results:\n time_left = end_time - time.time()\n num_matches = len(blacklist)\n print(\"\\r{0:> 5.1f} seconds left, {1} found\".format(time_left, num_matches), end=\"\")\n stdout.flush()\n # Receive message\n msg = bus.recv(0.1)\n if msg is None:\n continue\n # Classify\n if classifier_function(msg):\n # Add to blacklist\n blacklist.add(msg.arbitration_id)\n if print_results:\n num_matches = len(blacklist)\n print(\"\\r 0.0 seconds left, {0} found\".format(num_matches), end=\"\")\n if len(blacklist) > 0:\n print(\"\\n Detected IDs: {0}\".format(\" \".join(sorted(list(map(hex, blacklist))))))\n else:\n print()\n return blacklist" }, { "identifier": "list_to_hex_str", "path": "caringcaribou/utils/common.py", "snippet": "def list_to_hex_str(data, delimiter=\"\"):\n \"\"\"Returns a hex string representation of the int values\n in 'data', separated with 'delimiter' between each byte\n\n Example:\n list_to_hex_str([10, 100, 200]) -> 0a.64.c8\n list_to_hex_str([0x07, 0xff, 0x6c], \"\") -> 07ff6c\n :param data: iterable of values\n :param delimiter: separator between values in output\n :type data: [int]\n :type delimiter: str\n :return: hex string representation of data\n :rtype str\n \"\"\"\n data_string = delimiter.join([\"{0:02x}\".format(i) for i in data])\n return data_string" }, { "identifier": "parse_int_dec_or_hex", "path": "caringcaribou/utils/common.py", "snippet": "def parse_int_dec_or_hex(value):\n \"\"\"Parses an integer on base 10 (decimal) or 16 (hex with \"0x\" prefix)\n\n Examples:\n parse_int_dec_or_hex(\"1234\") -> 1234\n parse_int_dec_or_hex(\"0xa7\") -> 167\n\n :param value: the value to parse\n :type value: str\n :rtype int\n \"\"\"\n return int(value, 0)" }, { "identifier": "ARBITRATION_ID_MAX", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MAX = 0x7FF" }, { "identifier": "ARBITRATION_ID_MAX_EXTENDED", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MAX_EXTENDED = 0x18DAFFF1" }, { "identifier": "ARBITRATION_ID_MIN_EXTENDED", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MIN_EXTENDED = 0x18DA00F1" }, { "identifier": "ARBITRATION_ID_MIN", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MIN = 0x700" }, { "identifier": "IsoTp", "path": "caringcaribou/utils/iso15765_2.py", "snippet": "class IsoTp:\n \"\"\"\n Implementation of ISO-15765-2, also known as ISO-TP. This is a multi-frame messaging protocol\n over CAN, which allows message payloads of up to 4095 bytes.\n \"\"\"\n\n MAX_SF_LENGTH = 7\n MAX_FF_LENGTH = 6\n MAX_CF_LENGTH = 7\n\n SF_PCI_LENGTH = 1\n CF_PCI_LENGTH = 1\n FF_PCI_LENGTH = 2\n FC_PCI_LENGTH = 3\n\n FC_FS_CTS = 0\n FC_FS_WAIT = 1\n FC_FS_OVFLW = 2\n\n SF_FRAME_ID = 0\n FF_FRAME_ID = 1\n CF_FRAME_ID = 2\n FC_FRAME_ID = 3\n\n N_BS_TIMEOUT = 1.5\n\n MAX_FRAME_LENGTH = 8\n MAX_MESSAGE_LENGTH = 4095\n\n def __init__(self, arb_id_request, arb_id_response, bus=None, padding_value=0x00):\n # Setting default bus to None rather than the actual bus prevents a CanError when\n # called with a virtual CAN bus, while the OS is lacking a working CAN interface\n if bus is None:\n self.bus = can.Bus(DEFAULT_INTERFACE)\n else:\n self.bus = bus\n self.arb_id_request = arb_id_request\n self.arb_id_response = arb_id_response\n # Controls optional padding of SF messages and the last CF frame in multi-frame messages\n # Disabled padding is _not_ part of ISO-15765-2, but might prove useful for testing against some targets\n self.padding_value = padding_value\n if padding_value is None:\n self.padding_enabled = False\n else:\n self.padding_enabled = True\n if not isinstance(padding_value, int):\n raise TypeError(\"IsoTp: padding must be an integer or None, received '{0}'\".format(padding_value))\n if not 0x00 <= padding_value <= 0xFF:\n raise ValueError(\"IsoTp: padding must be in range 0x00-0xFF (0-255), got '{0}'\".format(padding_value))\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.bus.shutdown()\n\n def _set_filters(self, filters):\n \"\"\"\n Sets filters for the CAN bus - description can be found at\n https://python-can.readthedocs.io/en/stable/bus.html#can.BusABC.set_filters\n\n :param filters: dict specifying \"can_id\", \"can_mask\" and (optional) \"extended\" flag\n :return: None\n \"\"\"\n self.bus.set_filters(filters)\n\n def set_filter_single_arbitration_id(self, arbitration_id):\n \"\"\"Set a filter to only receive incoming messages on 'arbitration_id'\"\"\"\n arbitration_id_filter = [{\"can_id\": arbitration_id, \"can_mask\": ARBITRATION_ID_MAX_EXTENDED}]\n self._set_filters(arbitration_id_filter)\n\n def clear_filters(self):\n \"\"\"Remove arbitration ID filters\"\"\"\n self._set_filters(None)\n\n def send_message(self, data, arbitration_id, force_extended=False):\n \"\"\"\n Transmits a message using 'arbitration_id' and 'data' on 'self.bus'\n\n :param data: Data to send\n :param arbitration_id: Arbitration ID to use\n :param force_extended: Force extended arbitration ID\n :return: None\n \"\"\"\n is_extended = force_extended or arbitration_id > ARBITRATION_ID_MAX\n msg = can.Message(arbitration_id=arbitration_id, data=data, is_extended_id=is_extended)\n self.bus.send(msg)\n\n def decode_sf(self, frame):\n \"\"\"\n Decodes a singe frame (SF) message\n\n :param frame: Frame to decode\n :return: Tuple of single frame data length (SF_DL) and data if valid,\n Tuple of None, None otherwise\n \"\"\"\n if len(frame) >= self.SF_PCI_LENGTH:\n sf_dl = frame[0] & 0xF\n data = frame[1:]\n return sf_dl, list(data)\n else:\n return None, None\n\n def decode_ff(self, frame):\n \"\"\"\n Decodes a first frame (FF) message\n\n :param frame: Frame to decode\n :return: Tuple of first frame data length (FF_DL) and data if valid,\n Tuple of None, None otherwise\n \"\"\"\n if len(frame) >= self.FF_PCI_LENGTH:\n ff_dl = ((frame[0] & 0xF) << 8) | frame[1]\n data = frame[2:]\n return ff_dl, list(data)\n else:\n return None, None\n\n def decode_cf(self, frame):\n \"\"\"\n Decodes a consecutive frame (CF) message\n\n :param frame: Frame to decode\n :return: Tuple of sequence number (SN) and data if valid,\n Tuple of None, None otherwise\n \"\"\"\n if len(frame) >= self.CF_PCI_LENGTH:\n sn = frame[0] & 0xF\n data = frame[1:]\n return sn, list(data)\n else:\n return None, None\n\n def decode_fc(self, frame):\n \"\"\"\n Decodes a flow control (FC) frame\n\n :param frame: Frame to decode\n :return: Tuple of values flow status (FS), block size (BS) and separation time minimum (STmin) if valid,\n Tuple of None, None, None otherwise\n \"\"\"\n if len(frame) >= self.FC_PCI_LENGTH:\n fs = frame[0] & 0xF\n block_size = frame[1]\n st_min = frame[2]\n return fs, block_size, st_min\n else:\n return None, None, None\n\n def encode_fc(self, flow_status, block_size, st_min):\n \"\"\"\n Encodes a flow control (FC) message\n\n :param flow_status: Flow status (FS)\n :param block_size: Block size (BS)\n :param st_min: Separation time minimum (STmin)\n :return: Encoded data for the flow control message\n \"\"\"\n return [(self.FC_FRAME_ID << 4) | flow_status, block_size, st_min, 0, 0, 0, 0, 0]\n\n def send_request(self, message):\n \"\"\"\n Wrapper for sending 'message' as a request\n\n :param message: The message to send\n :return: None\n \"\"\"\n frames = self.get_frames_from_message(message, padding_value=self.padding_value)\n self.transmit(frames, self.arb_id_request, self.arb_id_response)\n\n def send_response(self, message):\n \"\"\"\n Wrapper for sending 'message' as a response\n\n :param message: The message to send\n :return: None\n \"\"\"\n frames = self.get_frames_from_message(message, padding_value=self.padding_value)\n self.transmit(frames, self.arb_id_response, self.arb_id_request)\n\n def indication(self, wait_window=None, trim_padding=True, first_frame_only=False):\n \"\"\"\n Receives an ISO-15765-2 message (one or more frames) and returns its content.\n\n :param wait_window: Max time (in seconds) to wait before timeout\n :param trim_padding: If True, removes message padding bytes from the received message\n :param first_frame_only: If True, return first frame only (simulating overflow behavior for multi-frame message)\n :return: A list of received data bytes if successful, None otherwise\n \"\"\"\n message = []\n\n if wait_window is None:\n wait_window = self.N_BS_TIMEOUT\n start_time = datetime.datetime.now()\n end_time = start_time + datetime.timedelta(seconds=wait_window)\n sn = 0\n message_length = 0\n\n while True:\n # Timeout check\n current_time = datetime.datetime.now()\n if current_time >= end_time:\n # Timeout\n return None\n # Receive frame\n msg = self.bus.recv(wait_window)\n if msg is not None:\n if msg.arbitration_id == self.arb_id_request:\n flow_control_arbitration_id = self.arb_id_response\n elif msg.arbitration_id == self.arb_id_response:\n flow_control_arbitration_id = self.arb_id_request\n else:\n # Unknown arbitration ID - ignore message\n continue\n frame = msg.data\n if len(frame) > 0:\n frame_type = (frame[0] >> 4) & 0xF\n if frame_type == self.SF_FRAME_ID:\n # Single frame (SF)\n dl, message = self.decode_sf(frame)\n if trim_padding:\n # Trim padding, in case the data exceeds single frame data length (SF_DL)\n message = message[:dl]\n break\n elif frame_type == self.FF_FRAME_ID:\n # First frame (FF) of a multi-frame message\n message_length, message = self.decode_ff(frame)\n if first_frame_only:\n # This is a hack to make it possible to only retrieve the first frame of a multi-frame\n # response, by telling the sender to stop sending data due to overflow\n ovflw_frame = self.encode_fc(self.FC_FS_OVFLW, 0, 0)\n # Respond with overflow (OVFLW) message\n self.send_message(ovflw_frame, flow_control_arbitration_id)\n # Return the first frame only\n break\n fc_frame = self.encode_fc(self.FC_FS_CTS, 0, 0)\n sn = 0\n # Respond with flow control (FC) message\n self.send_message(fc_frame, flow_control_arbitration_id)\n elif frame_type == self.CF_FRAME_ID:\n # Consecutive frame (CF)\n new_sn, data = self.decode_cf(frame)\n if (sn + 1) % 16 == new_sn:\n sn = new_sn\n message += data\n if len(message) >= message_length:\n # Last frame received\n if trim_padding:\n # Trim padding of last frame, which may exceed first frame data length (FF_DL)\n message = message[:message_length]\n # Stop listening for more frames\n break\n else:\n pass\n else:\n # Invalid frame type\n return None\n return list(message)\n\n def transmit(self, frames, arbitration_id, arbitration_id_flow_control):\n \"\"\"\n Transmits 'frames' in order on the bus, according to ISO-15765-2\n\n :param frames: List of frames (which are in turn lists of values) to send\n :param arbitration_id: The arbitration ID used for sending\n :param arbitration_id_flow_control: The arbitration ID used for receiving flow control (FC)\n :return: None\n \"\"\"\n if len(frames) == 0:\n # No data to send\n return None\n elif len(frames) == 1:\n # Single frame\n self.send_message(frames[0], arbitration_id)\n elif len(frames) > 1:\n # Multiple frames\n frame_index = 0\n # Send first frame (FF)\n self.send_message(frames[frame_index], arbitration_id)\n number_of_frames_left_to_send = len(frames) - 1\n number_of_frames_left_to_send_in_block = 0\n frame_index += 1\n st_min = 0\n while number_of_frames_left_to_send > 0:\n receiver_is_ready = False\n while not receiver_is_ready:\n # Wait for receiver to send flow control (FC)\n msg = self.bus.recv(self.N_BS_TIMEOUT)\n if msg is None:\n # Quit on timeout\n return None\n # Verify that msg uses the expected arbitration ID\n elif msg.arbitration_id != arbitration_id_flow_control:\n continue\n fc_frame = msg.data\n\n # Decode Flow Status (FS) from FC message\n fs, block_size, st_min = self.decode_fc(fc_frame)\n if fs == self.FC_FS_WAIT:\n # Flow status (FS) wait (WT)\n continue\n elif fs == self.FC_FS_CTS:\n # Continue to send (CTS)\n receiver_is_ready = True\n number_of_frames_left_to_send_in_block = block_size\n\n if number_of_frames_left_to_send < number_of_frames_left_to_send_in_block or block_size == 0:\n number_of_frames_left_to_send_in_block = number_of_frames_left_to_send\n # If STmin is specified in microseconds (0xF1-0xF9) or using reserved ranges (0x80-0xF0 and\n # 0xFA-0xFF), round up to one millisecond\n if st_min > 0x7F:\n st_min = 1\n elif fs == self.FC_FS_OVFLW:\n # Overflow - abort transmission\n return None\n else:\n # Timeout - did not receive a CTS message in time\n return None\n while number_of_frames_left_to_send_in_block > 0:\n # Send more frames, until it is time to wait for flow control (FC) again\n self.send_message(frames[frame_index], arbitration_id)\n frame_index += 1\n number_of_frames_left_to_send_in_block -= 1\n number_of_frames_left_to_send -= 1\n if number_of_frames_left_to_send_in_block > 0:\n time.sleep(st_min / 1000)\n\n @staticmethod\n def get_frames_from_message(message, padding_value=0x00):\n \"\"\"\n Returns a copy of 'message' split into frames,\n :param message: Message to split\n :param padding_value: Integer value used to pad messages, or None to disable padding (not part of ISO-15765-3)\n :return: List of frames\n \"\"\"\n if padding_value is None:\n padding_enabled = False\n padding_value = 0x00\n else:\n padding_enabled = True\n\n frame_list = []\n message_length = len(message)\n if message_length > IsoTp.MAX_MESSAGE_LENGTH:\n error_msg = \"Message too long for ISO-TP. Max allowed length is {0} bytes, received {1} bytes\".format(\n IsoTp.MAX_MESSAGE_LENGTH, message_length)\n raise ValueError(error_msg)\n if message_length <= IsoTp.MAX_SF_LENGTH:\n # Single frame (SF) message\n if padding_enabled:\n frame = [padding_value] * IsoTp.MAX_FRAME_LENGTH\n else:\n frame = [padding_value] * (message_length + 1)\n frame[0] = (IsoTp.SF_FRAME_ID << 4) | message_length\n for i in range(0, message_length):\n frame[1 + i] = message[i]\n frame_list.append(frame)\n else:\n # Multiple frame message\n bytes_left_to_copy = message_length\n # Create first frame (FF)\n frame = [padding_value] * IsoTp.MAX_FRAME_LENGTH\n frame[0] = (IsoTp.FF_FRAME_ID << 4) | (message_length >> 8)\n frame[1] = message_length & 0xFF\n for i in range(0, IsoTp.MAX_FF_LENGTH):\n frame[2 + i] = message[i]\n frame_list.append(frame)\n # Create consecutive frames (CF)\n bytes_copied = IsoTp.MAX_FF_LENGTH\n bytes_left_to_copy -= bytes_copied\n sn = 0\n while bytes_left_to_copy > 0:\n sn = (sn + 1) % 16\n if not padding_enabled and bytes_left_to_copy < 7:\n # Skip padding on last CF\n frame = [padding_value] * (bytes_left_to_copy + 1)\n else:\n frame = [padding_value] * IsoTp.MAX_FRAME_LENGTH\n frame[0] = (IsoTp.CF_FRAME_ID << 4) | sn\n # Fill current CF\n bytes_to_copy_to_current_cf = min(IsoTp.MAX_CF_LENGTH, bytes_left_to_copy)\n for i in range(bytes_to_copy_to_current_cf):\n frame[1 + i] = message[bytes_copied]\n bytes_left_to_copy = bytes_left_to_copy - 1\n bytes_copied = bytes_copied + 1\n frame_list.append(frame)\n return frame_list" }, { "identifier": "Constants", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n # the SI values. The NR_SI value is not used as a SI value in order to\n # make A_Data coding and decoding easier.\"\n NR_SI = 0x7F" }, { "identifier": "Iso14229_1", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Iso14229_1(object):\n P3_CLIENT = 5\n\n def __init__(self, tp):\n self.tp = tp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n @staticmethod\n def get_service_response_id(request_id):\n \"\"\"\n Returns the service response ID for the given request ID\n\n :param request_id: Request service ID\n :return: Corresponding response service ID\n \"\"\"\n return request_id + 0x40\n\n @staticmethod\n def get_service_request_id(response_id):\n \"\"\"\n Returns the service request ID for the given response ID\n\n :param response_id: Response service ID\n :return: Corresponding request service ID\n \"\"\"\n return response_id - 0x40\n\n def send_request(self, data):\n \"\"\"\n Sends a request message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_request(data)\n\n def send_response(self, data):\n \"\"\"\n Sends a response message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_response(data)\n\n def receive_response(self, wait_window):\n \"\"\"\n Attempts to receive a response through the underlying TP layer\n\n :param wait_window: Minimum time (in seconds) to wait before timeout\n :return: The received response if successful,\n None otherwise\n \"\"\"\n start_time = time.process_time()\n while True:\n current_time = time.process_time()\n if (current_time - start_time) > wait_window:\n return None\n\n response = self.tp.indication(wait_window)\n NRC = NegativeResponseCodes\n NRC_RCRRP = NRC.REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\n if response is not None and len(response) >= 3:\n if (response[0] == Constants.NR_SI and\n response[2] == NRC_RCRRP):\n continue\n break\n return response\n\n @staticmethod\n def is_positive_response(response):\n \"\"\"\n Returns a bool indicating whether 'response' is positive\n\n :param response: ISO-14229-1 response data\n :return: False if response is a NEGATIVE_RESPONSE,\n True otherwise\n \"\"\"\n if (response is not None and\n len(response) > 0 and\n response[0] != Constants.NR_SI):\n return True\n return False\n\n def read_data_by_identifier(self, identifier):\n \"\"\"\n Sends a \"read data by identifier\" request for 'identifier'\n\n :param identifier: Data identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n response = []\n num_dids = len(identifier)\n if num_dids > 0:\n request = [0] * ((num_dids * 2) + 1)\n request[0] = ServiceID.READ_DATA_BY_IDENTIFIER\n for i in range(0, num_dids):\n request[i * 2 + 1] = (identifier[i] >> 8) & 0xFF\n request[i * 2 + 2] = identifier[i] & 0xFF\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n return response\n\n def read_memory_by_address(self, address_and_length_format,\n memory_address, memory_size):\n \"\"\"\n Sends a \"read memory by address\" request for 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.READ_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_memory_by_address(self, address_and_length_format,\n memory_address, memory_size, data):\n \"\"\"\n Sends a \"write memory by address\" request to write 'data' to\n 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :param data: The data to write to 'memory_address'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.WRITE_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_data_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"write data by identifier\" request to write 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data to write to 'identifier'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.WRITE_DATA_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def input_output_control_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"input output control by identifier\" request for 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def dynamically_define_data_identifier(self, identifier,\n sub_function, sub_function_arg):\n \"\"\"\n Sends a \"dynamically define data identifier\" request for\n 'identifier'\n\n :param identifier: DDDID to set\n :param sub_function: Sub function\n :param sub_function_arg: Sub function arguments\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (identifier is None or\n sub_function is None or\n sub_function_arg is None):\n return None\n\n request = [0] * (1 + 1 + 2 + len(sub_function_arg) * 4)\n request[0] = ServiceID.DYNAMICALLY_DEFINE_DATA_IDENTIFIER\n request[1] = sub_function\n request[2] = (identifier >> 8) & 0xFF\n request[3] = identifier & 0xFF\n\n offset = 4\n for did in sub_function_arg:\n request[offset + 0] = (did.sourceDataIdentifier >> 8) & 0xFF\n request[offset + 1] = did.sourceDataIdentifier & 0xFF\n request[offset + 2] = did.positionInSourceDataRecord\n request[offset + 3] = did.memorySize\n offset += 4\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def diagnostic_session_control(self, session_type):\n \"\"\"\n Sends a \"DiagnosticSessionControl\" request for specified session\n type\n\n :param session_type: Indicates which kind of session should be\n requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n request[1] = session_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def ecu_reset(self, reset_type):\n \"\"\"\n Sends an \"ECU reset\" request for specified reset type\n\n :param reset_type: Indicates which kind of reset should be requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.ECU_RESET\n request[1] = reset_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_request_seed(self, level, data_record=None):\n \"\"\"\n Sends a Security Access \"Request seed\" message for 'level'\n\n :param level: Security Access Type level to send request seed for\n :param data_record: Optional data to transmit when requesting seed,\n e.g. client identification\n :return: Response data (containing seed) if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n if data_record:\n for data_record in data_record:\n request.append(data_record)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_send_key(self, level, key):\n \"\"\"\n Sends a Security Access \"Send key\" message with 'key' for 'level'\n\n :param level: Security Access Type level to send key for\n :param key: Key to transmit\n :return: Response data if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n for key_byte in key:\n request.append(key_byte)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def read_data_by_periodic_identifier(self, transmission_mode,\n identifier):\n \"\"\"\n Sends a \"read data by periodic identifier\" request for 'identifier'\n\n :param transmission_mode: Transmission mode\n :param identifier: Identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (transmission_mode is None or\n identifier is None or\n len(identifier) == 0):\n return None\n\n request = [0] * (2 + len(identifier))\n request[0] = ServiceID.READ_DATA_BY_PERIODIC_IDENTIFIER\n request[1] = transmission_mode\n\n for i in range(0, len(identifier)):\n request[2 + i] = identifier[i]\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response" }, { "identifier": "NegativeResponseCodes", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class NegativeResponseCodes(object):\n \"\"\"\n ISO-14229-1 negative response codes\n \"\"\"\n POSITIVE_RESPONSE = 0x00\n # 0x01-0x0F ISO SAE Reserved\n GENERAL_REJECT = 0x10\n SERVICE_NOT_SUPPORTED = 0x11\n SUB_FUNCTION_NOT_SUPPORTED = 0x12\n INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT = 0x13\n RESPONSE_TOO_LONG = 0x14\n # 0x15-0x20 ISO SAE Reserved\n BUSY_REPEAT_REQUEST = 0x21\n CONDITIONS_NOT_CORRECT = 0x22\n # 0x23 ISO SAE Reserved\n REQUEST_SEQUENCE_ERROR = 0x24\n NO_RESPONSE_FROM_SUBNET_COMPONENT = 0x25\n FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION = 0x26\n # 0x27-0x30 ISO SAE Reserved\n REQUEST_OUT_OF_RANGE = 0x31\n # 0x32 ISO SAE Reserved\n SECURITY_ACCESS_DENIED = 0x33\n # 0x34 ISO SAE Reserved\n INVALID_KEY = 0x35\n EXCEEDED_NUMBER_OF_ATTEMPTS = 0x36\n REQUIRED_TIME_DELAY_NOT_EXPIRED = 0x37\n # 0x38-0x4F Reserved by extended data link security document\n # 0x50-0x6F ISO SAE Reserved\n UPLOAD_DOWNLOAD_NOT_ACCEPTED = 0x70\n TRANSFER_DATA_SUSPENDED = 0x71\n GENERAL_PROGRAMMING_FAILURE = 0x72\n WRONG_BLOCK_SEQUENCE_COUNTER = 0x73\n # 0x74-0x77 ISO SAE Reserved\n REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING = 0x78\n # 0x79-0x7D ISO SAE Reserved\n SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7E\n SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7F\n # 0x80 ISO SAE Reserved\n RPM_TOO_HIGH = 0x81\n RPM_TOO_LOW = 0x82\n ENGINE_IS_RUNNING = 0x83\n ENGINE_IS_NOT_RUNNING = 0x84\n ENGINE_RUN_TIME_TOO_LOW = 0x85\n TEMPERATURE_TOO_HIGH = 0x86\n TEMPERATURE_TOO_LOW = 0x87\n VEHICLE_SPEED_TOO_HIGH = 0x88\n VEHICLE_SPEED_TOO_LOW = 0x89\n THROTTLE_PEDAL_TOO_HIGH = 0x8A\n THROTTLE_PEDAL_TOO_LOW = 0x8B\n TRANSMISSION_RANGE_NOT_IN_NEUTRAL = 0x8C\n TRANSMISSION_RANGE_NOT_IN_GEAR = 0x8D\n # 0x8E ISO SAE Reserved\n BRAKE_SWITCHES_NOT_CLOSED = 0x8F\n SHIFT_LEVER_NOT_IN_PARK = 0x90\n TORQUE_CONVERTER_CLUTCH_LOCKED = 0x91\n VOLTAGE_TOO_HIGH = 0x92\n VOLTAGE_TOO_LOW = 0x93\n # 0x94-0xEF Reserved for specific conditions not correct\n # 0xF0-0xFE Vehicle manufacturer specific conditions not correct\n # 0xFF ISO SAE Reserved" }, { "identifier": "Services", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Services(object):\n \"\"\"Class structure containing service specific constants, sub-function\n parameters and functions\"\"\"\n\n class DiagnosticSessionControl(BaseService):\n\n service_id = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n\n class DiagnosticSessionType(object):\n # 0x00 ISO SAE Reserved\n DEFAULT_SESSION = 0x01\n PROGRAMMING_SESSION = 0x02\n EXTENDED_DIAGNOSTIC_SESSION = 0x03\n SAFETY_SYSTEM_DIAGNOSTIC_SESSION = 0x04\n # 0x05-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n VEHICLE_MANUFACTURER_SESSION_MIN = 0x40\n VEHICLE_MANUFACTURER_SESSION_MAX = 0x5F\n # 0x60-0x7E System supplier specific\n SYSTEM_SUPPLIER_SESSION_MIN = 0x60\n SYSTEM_SUPPLIER_SESSION_MAX = 0x7E\n # 0x7F ISO SAE Reserved\n\n class EcuReset(BaseService):\n\n service_id = ServiceID.ECU_RESET\n\n class ResetType(object):\n # 0x00 ISO SAE Reserved\n HARD_RESET = 0x01\n KEY_OFF_ON_RESET = 0x02\n SOFT_RESET = 0x03\n ENABLE_RAPID_POWER_SHUTDOWN = 0x04\n DISABLE_RAPID_POWER_SHUTDOWN = 0x05\n # 0x06-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n # 0x60-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n class SecurityAccess(BaseService):\n\n service_id = ServiceID.SECURITY_ACCESS\n\n class RequestSeedOrSendKey(object):\n \"\"\"\n These are lined up so that value X \"request seed level N\" has\n a matching \"send key level N\" at value X+1.\n\n 0x01 is Request seed level 0x01\n 0x02 is Send key level 0x01\n 0x03 is Request seed level 0x02\n 0x04 is Send key level 0x02\n (...)\n 0x41 is Request seed level 0x21\n 0x42 is Send key level 0x21\n\n The security levels numbering is arbitrary and does not imply\n any relationship between the levels.\n \"\"\"\n\n # 0x00 ISO SAE Reserved\n # 0x01-0x42 Vehicle manufacturer specific request\n # seed/send key pairs\n # 0x43-0X5E ISO SAE Reserved\n ISO_26021_2_VALUES = 0x5F\n ISO_26021_2_SEND_KEY = 0x60\n # 0x61-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n __REQUEST_SEED_MIN = 0x01\n __REQUEST_SEED_MAX = 0x41\n __SEND_KEY_MIN = 0x02\n __SEND_KEY_MAX = 0x42\n\n def is_valid_request_seed_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid request seed\n value and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__REQUEST_SEED_MIN\n <= value <= self.__REQUEST_SEED_MAX)\n is_odd = value % 2 == 1\n return valid_interval and is_odd\n\n def is_valid_send_key_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid send key value\n and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__SEND_KEY_MIN\n <= value <= self.__SEND_KEY_MAX)\n is_even = value % 2 == 0\n return valid_interval and is_even\n\n @staticmethod\n def get_send_key_for_request_seed(seed):\n return seed + 1\n\n class TesterPresent(BaseService):\n\n service_id = ServiceID.TESTER_PRESENT" }, { "identifier": "ServiceID", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class ServiceID(object):\n \"\"\"\n ISO-14229-1 service ID definitions\n \"\"\"\n DIAGNOSTIC_SESSION_CONTROL = 0x10\n ECU_RESET = 0x11\n CLEAR_DIAGNOSTIC_INFORMATION = 0x14\n READ_DTC_INFORMATION = 0x19\n READ_DATA_BY_IDENTIFIER = 0x22\n READ_MEMORY_BY_ADDRESS = 0x23\n READ_SCALING_DATA_BY_IDENTIFIER = 0x24\n SECURITY_ACCESS = 0x27\n COMMUNICATION_CONTROL = 0x28\n READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2A\n DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2C\n WRITE_DATA_BY_IDENTIFIER = 0x2E\n INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2F\n ROUTINE_CONTROL = 0x31\n REQUEST_DOWNLOAD = 0x34\n REQUEST_UPLOAD = 0x35\n TRANSFER_DATA = 0x36\n REQUEST_TRANSFER_EXIT = 0x37\n REQUEST_FILE_TRANSFER = 0x38\n WRITE_MEMORY_BY_ADDRESS = 0x3D\n TESTER_PRESENT = 0x3E\n ACCESS_TIMING_PARAMETER = 0x83\n SECURED_DATA_TRANSMISSION = 0x84\n CONTROL_DTC_SETTING = 0x85\n RESPONSE_ON_EVENT = 0x86\n LINK_CONTROL = 0x87" } ]
from caringcaribou.utils.can_actions import auto_blacklist from caringcaribou.utils.common import list_to_hex_str, parse_int_dec_or_hex from caringcaribou.utils.constants import ARBITRATION_ID_MAX, ARBITRATION_ID_MAX_EXTENDED,ARBITRATION_ID_MIN_EXTENDED from caringcaribou.utils.constants import ARBITRATION_ID_MIN from caringcaribou.utils.iso15765_2 import IsoTp from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, Services, ServiceID from sys import stdout, version_info, stderr import argparse import datetime import time
12,245
0x12: "SUB_FUNCTION_NOT_SUPPORTED", 0x13: "INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT", 0x14: "RESPONSE_TOO_LONG", 0x21: "BUSY_REPEAT_REQUEST", 0x22: "CONDITIONS_NOT_CORRECT", 0x24: "REQUEST_SEQUENCE_ERROR", 0x25: "NO_RESPONSE_FROM_SUBNET_COMPONENT", 0x26: "FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION", 0x31: "REQUEST_OUT_OF_RANGE", 0x33: "SECURITY_ACCESS_DENIED", 0x35: "INVALID_KEY", 0x36: "EXCEEDED_NUMBER_OF_ATTEMPTS", 0x37: "REQUIRED_TIME_DELAY_NOT_EXPIRED", 0x70: "UPLOAD_DOWNLOAD_NOT_ACCEPTED", 0x71: "TRANSFER_DATA_SUSPENDED", 0x72: "GENERAL_PROGRAMMING_FAILURE", 0x73: "WRONG_BLOCK_SEQUENCE_COUNTER", 0x78: "REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING", 0x7E: "SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION", 0x7F: "SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION", 0x81: "RPM_TOO_HIGH", 0x82: "RPM_TOO_LOW", 0x83: "ENGINE_IS_RUNNING", 0x84: "ENGINE_IS_NOT_RUNNING", 0x85: "ENGINE_RUN_TIME_TOO_LOW", 0x86: "TEMPERATURE_TOO_HIGH", 0x87: "TEMPERATURE_TOO_LOW", 0x88: "VEHICLE_SPEED_TOO_HIGH", 0x89: "VEHICLE_SPEED_TOO_LOW", 0x8A: "THROTTLE_PEDAL_TOO_HIGH", 0x8B: "THROTTLE_PEDAL_TOO_LOW", 0x8C: "TRANSMISSION_RANGE_NOT_IN_NEUTRAL", 0x8D: "TRANSMISSION_RANGE_NOT_IN_GEAR", 0x8F: "BRAKE_SWITCHES_NOT_CLOSED", 0x90: "SHIFT_LEVER_NOT_IN_PARK", 0x91: "TORQUE_CONVERTER_CLUTCH_LOCKED", 0x92: "VOLTAGE_TOO_HIGH", 0x93: "VOLTAGE_TOO_LOW" } DELAY_DISCOVERY = 0.01 DELAY_TESTER_PRESENT = 0.5 DELAY_SECSEED_RESET = 0.01 TIMEOUT_SERVICES = 0.2 TIMEOUT_SUBSERVICES = 0.02 # Max number of arbitration IDs to backtrack during verification VERIFICATION_BACKTRACK = 5 # Extra time in seconds to wait for responses during verification VERIFICATION_EXTRA_DELAY = 0.5 BYTE_MIN = 0x00 BYTE_MAX = 0xFF DUMP_DID_MIN = 0x0000 DUMP_DID_MAX = 0xFFFF DUMP_DID_TIMEOUT = 0.2 def uds_discovery(E, min_id, max_id, blacklist_args, auto_blacklist_duration, delay, verify, print_results=True): """Scans for diagnostics support by brute forcing session control messages to different arbitration IDs. Returns a list of all (client_arb_id, server_arb_id) pairs found. :param min_id: start arbitration ID value :param max_id: end arbitration ID value :param blacklist_args: blacklist for arbitration ID values :param auto_blacklist_duration: seconds to scan for interfering arbitration IDs to blacklist automatically :param delay: delay between each message :param verify: whether found arbitration IDs should be verified :param print_results: whether results should be printed to stdout :type min_id: int :type max_id: int :type blacklist_args: [int] :type auto_blacklist_duration: float :type delay: float :type verify: bool :type print_results: bool :return: list of (client_arbitration_id, server_arbitration_id) pairs :rtype [(int, int)] """ # Set defaults #-E为扩展帧 if E: max_id = ARBITRATION_ID_MAX_EXTENDED min_id = ARBITRATION_ID_MIN_EXTENDED elif min_id is None: min_id = ARBITRATION_ID_MIN max_id = ARBITRATION_ID_MAX if auto_blacklist_duration is None: auto_blacklist_duration = 0 if blacklist_args is None: blacklist_args = [] # Sanity checks if max_id < min_id: raise ValueError("max_id must not be smaller than min_id -" " got min:0x{0:x}, max:0x{1:x}".format(min_id, max_id)) if auto_blacklist_duration < 0: raise ValueError("auto_blacklist_duration must not be smaller " "than 0, got {0}'".format(auto_blacklist_duration)) diagnostic_session_control = Services.DiagnosticSessionControl service_id = diagnostic_session_control.service_id sub_function = diagnostic_session_control.DiagnosticSessionType.DEFAULT_SESSION session_control_data = [service_id, sub_function] valid_session_control_responses = [0x50, 0x7F] def is_valid_response(message): return (len(message.data) >= 2 and message.data[1] in valid_session_control_responses) found_arbitration_ids = []
''' module_template.py This file contains a template for a simple CaringCaribou module. The module's entry point is the 'module_main' function. Steps to add this module to CaringCaribou and run it: 1. Copy this template into the `caringcaribou/modules` directory: $ cp module_template.py my_module.py 2. In `setup.py`, add an entry under `caringcaribou.modules`, referencing your new module like: `my_module = caringcaribou.modules.my_module` 3. Run: `setup.py install` 4. Verify that the module is available, it should be listed in the output of `cc.py -h` 5. Run the following command to run module and show usage instructions: $ cc.py my_module -h ''' from __future__ import print_function # Handle large ranges efficiently in both python 2 and 3 if version_info[0] == 2: range = xrange UDS_SERVICE_NAMES = { 0x10: "DIAGNOSTIC_SESSION_CONTROL", 0x11: "ECU_RESET", 0x14: "CLEAR_DIAGNOSTIC_INFORMATION", 0x19: "READ_DTC_INFORMATION", 0x20: "RETURN_TO_NORMAL", 0x22: "READ_DATA_BY_IDENTIFIER", 0x23: "READ_MEMORY_BY_ADDRESS", 0x24: "READ_SCALING_DATA_BY_IDENTIFIER", 0x27: "SECURITY_ACCESS", 0x28: "COMMUNICATION_CONTROL", 0x2A: "READ_DATA_BY_PERIODIC_IDENTIFIER", 0x2C: "DYNAMICALLY_DEFINE_DATA_IDENTIFIER", 0x2D: "DEFINE_PID_BY_MEMORY_ADDRESS", 0x2E: "WRITE_DATA_BY_IDENTIFIER", 0x2F: "INPUT_OUTPUT_CONTROL_BY_IDENTIFIER", 0x31: "ROUTINE_CONTROL", 0x34: "REQUEST_DOWNLOAD", 0x35: "REQUEST_UPLOAD", 0x36: "TRANSFER_DATA", 0x37: "REQUEST_TRANSFER_EXIT", 0x38: "REQUEST_FILE_TRANSFER", 0x3D: "WRITE_MEMORY_BY_ADDRESS", 0x3E: "TESTER_PRESENT", 0x7F: "NEGATIVE_RESPONSE", 0x83: "ACCESS_TIMING_PARAMETER", 0x84: "SECURED_DATA_TRANSMISSION", 0x85: "CONTROL_DTC_SETTING", 0x86: "RESPONSE_ON_EVENT", 0x87: "LINK_CONTROL" } NRC_NAMES = { 0x00: "POSITIVE_RESPONSE", 0x10: "GENERAL_REJECT", 0x11: "SERVICE_NOT_SUPPORTED", 0x12: "SUB_FUNCTION_NOT_SUPPORTED", 0x13: "INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT", 0x14: "RESPONSE_TOO_LONG", 0x21: "BUSY_REPEAT_REQUEST", 0x22: "CONDITIONS_NOT_CORRECT", 0x24: "REQUEST_SEQUENCE_ERROR", 0x25: "NO_RESPONSE_FROM_SUBNET_COMPONENT", 0x26: "FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION", 0x31: "REQUEST_OUT_OF_RANGE", 0x33: "SECURITY_ACCESS_DENIED", 0x35: "INVALID_KEY", 0x36: "EXCEEDED_NUMBER_OF_ATTEMPTS", 0x37: "REQUIRED_TIME_DELAY_NOT_EXPIRED", 0x70: "UPLOAD_DOWNLOAD_NOT_ACCEPTED", 0x71: "TRANSFER_DATA_SUSPENDED", 0x72: "GENERAL_PROGRAMMING_FAILURE", 0x73: "WRONG_BLOCK_SEQUENCE_COUNTER", 0x78: "REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING", 0x7E: "SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION", 0x7F: "SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION", 0x81: "RPM_TOO_HIGH", 0x82: "RPM_TOO_LOW", 0x83: "ENGINE_IS_RUNNING", 0x84: "ENGINE_IS_NOT_RUNNING", 0x85: "ENGINE_RUN_TIME_TOO_LOW", 0x86: "TEMPERATURE_TOO_HIGH", 0x87: "TEMPERATURE_TOO_LOW", 0x88: "VEHICLE_SPEED_TOO_HIGH", 0x89: "VEHICLE_SPEED_TOO_LOW", 0x8A: "THROTTLE_PEDAL_TOO_HIGH", 0x8B: "THROTTLE_PEDAL_TOO_LOW", 0x8C: "TRANSMISSION_RANGE_NOT_IN_NEUTRAL", 0x8D: "TRANSMISSION_RANGE_NOT_IN_GEAR", 0x8F: "BRAKE_SWITCHES_NOT_CLOSED", 0x90: "SHIFT_LEVER_NOT_IN_PARK", 0x91: "TORQUE_CONVERTER_CLUTCH_LOCKED", 0x92: "VOLTAGE_TOO_HIGH", 0x93: "VOLTAGE_TOO_LOW" } DELAY_DISCOVERY = 0.01 DELAY_TESTER_PRESENT = 0.5 DELAY_SECSEED_RESET = 0.01 TIMEOUT_SERVICES = 0.2 TIMEOUT_SUBSERVICES = 0.02 # Max number of arbitration IDs to backtrack during verification VERIFICATION_BACKTRACK = 5 # Extra time in seconds to wait for responses during verification VERIFICATION_EXTRA_DELAY = 0.5 BYTE_MIN = 0x00 BYTE_MAX = 0xFF DUMP_DID_MIN = 0x0000 DUMP_DID_MAX = 0xFFFF DUMP_DID_TIMEOUT = 0.2 def uds_discovery(E, min_id, max_id, blacklist_args, auto_blacklist_duration, delay, verify, print_results=True): """Scans for diagnostics support by brute forcing session control messages to different arbitration IDs. Returns a list of all (client_arb_id, server_arb_id) pairs found. :param min_id: start arbitration ID value :param max_id: end arbitration ID value :param blacklist_args: blacklist for arbitration ID values :param auto_blacklist_duration: seconds to scan for interfering arbitration IDs to blacklist automatically :param delay: delay between each message :param verify: whether found arbitration IDs should be verified :param print_results: whether results should be printed to stdout :type min_id: int :type max_id: int :type blacklist_args: [int] :type auto_blacklist_duration: float :type delay: float :type verify: bool :type print_results: bool :return: list of (client_arbitration_id, server_arbitration_id) pairs :rtype [(int, int)] """ # Set defaults #-E为扩展帧 if E: max_id = ARBITRATION_ID_MAX_EXTENDED min_id = ARBITRATION_ID_MIN_EXTENDED elif min_id is None: min_id = ARBITRATION_ID_MIN max_id = ARBITRATION_ID_MAX if auto_blacklist_duration is None: auto_blacklist_duration = 0 if blacklist_args is None: blacklist_args = [] # Sanity checks if max_id < min_id: raise ValueError("max_id must not be smaller than min_id -" " got min:0x{0:x}, max:0x{1:x}".format(min_id, max_id)) if auto_blacklist_duration < 0: raise ValueError("auto_blacklist_duration must not be smaller " "than 0, got {0}'".format(auto_blacklist_duration)) diagnostic_session_control = Services.DiagnosticSessionControl service_id = diagnostic_session_control.service_id sub_function = diagnostic_session_control.DiagnosticSessionType.DEFAULT_SESSION session_control_data = [service_id, sub_function] valid_session_control_responses = [0x50, 0x7F] def is_valid_response(message): return (len(message.data) >= 2 and message.data[1] in valid_session_control_responses) found_arbitration_ids = []
with IsoTp(None, None) as tp:
7
2023-11-13 05:05:46+00:00
16k
L1bra1/WeakMotion
train_WeakMotionNet.py
[ { "identifier": "WeakMotionNet", "path": "weak_model.py", "snippet": "class WeakMotionNet(nn.Module):\n def __init__(self, out_seq_len=1, FGBG_category_num=2, height_feat_size=13):\n super(WeakMotionNet, self).__init__()\n self.out_seq_len = out_seq_len\n\n self.motion_pred = MotionPrediction(seq_len=self.out_seq_len)\n self.FGBG_classify = FGBGEstimation(motion_category_num=FGBG_category_num)\n self.stpn = STPN(height_feat_size=height_feat_size)\n\n\n def forward(self, bevs):\n bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)\n\n # Backbone network\n x = self.stpn(bevs)\n\n # FG/BG segmentation head\n FGBG_class_pred = self.FGBG_classify(x)\n\n # Motion Displacement prediction\n disp = self.motion_pred(x)\n disp = disp.view(-1, 2, x.size(-2), x.size(-1))\n\n return disp, FGBG_class_pred" }, { "identifier": "DatasetSingleSeq_Stage2", "path": "data/weak_nuscenes_dataloader.py", "snippet": "class DatasetSingleSeq_Stage2(Dataset):\n \"\"\"\n Generate the nuScenes training dataset for Stage2\n\n Parameters\n ----------\n dataset_root : Path to input data root directory\n weakdata_root: Path to weak supervision data root directory\n FBdata_root: Path to FG/BG masks predicted by PreSegNet in Stage1\n split : [train/val/test]\n annotation_ratio: Desired FG/BG annotation ratio. Should be consistent with the ratio in Stage1\n num_points_seg: Desired number of points in the current frame. Will be used to train the FG/BG segmentation head\n num_points_motion: Desired number of FG points in the three frames. Will be used for Chamfer loss\n \"\"\"\n def __init__(self, dataset_root=None, weakdata_root=None, FBdata_root=None, split='train', future_frame_skip=0, voxel_size=(0.25, 0.25, 0.4),\n area_extents=np.array([[-32., 32.], [-32., 32.], [-3., 2.]]), dims=(256, 256, 13), num_category=5,\n annotation_ratio=1.0, num_points_seg = 30000, num_points_motion = 12000):\n\n if dataset_root is None:\n raise ValueError(\"The {} dataset root is None. Should specify its value.\".format(split))\n\n self.dataset_root = dataset_root\n print(\"data root:\", dataset_root)\n self.weakdata_root = weakdata_root\n self.FBdata_root = FBdata_root\n\n seq_dirs = []\n if split == 'train':\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d) + '/0.npy'\n seq_dirs.append(tmp_0)\n else:\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d) + '/0.npy'\n seq_dirs.append(tmp_0)\n\n self.seq_files = seq_dirs\n self.num_sample_seqs = len(self.seq_files)\n print(\"The number of {} sequences: {}\".format(split, self.num_sample_seqs))\n\n # For training, the size of dataset should be 17065 * 1; for validation: 1719; for testing: 4309\n if split == 'train' and self.num_sample_seqs != 17065:\n warnings.warn(\">> The size of training dataset is not 17065 * 2.\\n\")\n elif split == 'val' and self.num_sample_seqs != 1719:\n warnings.warn(\">> The size of validation dataset is not 1719.\\n\")\n elif split == 'test' and self.num_sample_seqs != 4309:\n warnings.warn('>> The size of test dataset is not 4309.\\n')\n\n self.split = split\n self.voxel_size = voxel_size\n self.area_extents = area_extents\n self.future_frame_skip = future_frame_skip\n self.dims = dims\n self.annotation_ratio = annotation_ratio\n self.num_points_seg = num_points_seg\n self.num_points_motion = num_points_motion\n self.num_category = num_category\n\n def __len__(self):\n return self.num_sample_seqs\n\n def sample_foreground_point(self, pc, FGBG_label, use_GT_label=True):\n pc, not_close = remove_close(pc, radius=1.0)\n pc, filter_idx = filter_pc(pc, extents=self.area_extents)\n\n if use_GT_label:\n FGBG_label = FGBG_label[not_close]\n FGBG_label = FGBG_label[filter_idx]\n FG_mask = FGBG_label == 2\n else:\n FG_mask = FGBG_label\n\n FG_point = pc[FG_mask]\n FG_point_num = FG_point.shape[0]\n\n if FG_point_num != 0:\n if FG_point_num >= self.num_points_motion:\n sample_idx = np.random.choice(FG_point_num, self.num_points_motion, replace=False)\n FG_point_num = self.num_points_motion\n else:\n sample_idx = np.concatenate((np.arange(FG_point_num),\n np.random.choice(FG_point_num, self.num_points_motion - FG_point_num, replace=True)), axis=-1)\n FG_point = FG_point[sample_idx]\n else:\n FG_point = np.zeros((self.num_points_motion, 3))\n\n return FG_point, FG_point_num\n\n def __getitem__(self, idx):\n seq_file = self.seq_files[idx]\n gt_data_handle = np.load(seq_file, allow_pickle=True)\n gt_dict = gt_data_handle.item()\n\n dims = gt_dict['3d_dimension']\n num_future_pcs = gt_dict['num_future_pcs']\n num_past_pcs = gt_dict['num_past_pcs']\n pixel_indices = gt_dict['pixel_indices']\n\n sparse_disp_field_gt = gt_dict['disp_field']\n all_disp_field_gt = np.zeros((num_future_pcs, dims[0], dims[1], 2), dtype=np.float32)\n all_disp_field_gt[:, pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_disp_field_gt[:]\n\n sparse_valid_pixel_maps = gt_dict['valid_pixel_map']\n all_valid_pixel_maps = np.zeros((num_future_pcs, dims[0], dims[1]), dtype=np.float32)\n all_valid_pixel_maps[:, pixel_indices[:, 0], pixel_indices[:, 1]] = sparse_valid_pixel_maps[:]\n\n sparse_pixel_cat_maps = gt_dict['pixel_cat_map']\n pixel_cat_map = np.zeros((dims[0], dims[1], self.num_category), dtype=np.float32)\n pixel_cat_map[pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_pixel_cat_maps[:]\n\n non_empty_map = np.zeros((dims[0], dims[1]), dtype=np.float32)\n non_empty_map[pixel_indices[:, 0], pixel_indices[:, 1]] = 1.0\n\n padded_voxel_points = list()\n for i in range(num_past_pcs):\n indices = gt_dict['voxel_indices_' + str(i)]\n curr_voxels = np.zeros(dims, dtype=np.bool)\n curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1\n padded_voxel_points.append(curr_voxels)\n padded_voxel_points = np.stack(padded_voxel_points, 0).astype(np.float32)\n\n # get weak supervision\n if self.split == 'train':\n scene_name = seq_file.split('/')[-2]\n weak_file_name = os.path.join(os.path.join(self.weakdata_root, scene_name), '0.npy')\n weak_data_handle = np.load(weak_file_name, allow_pickle=True)\n weak_dict = weak_data_handle.item()\n\n # get FG/BG annotations for the current frame,\n # this procedure is the same as the data preparation in Stage1\n # 0: past frame; 1: current frame; 2: future frame\n\n pc_seg = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_seg = weak_dict['points_label_1']\n FGBG_gt_mask_seg = convert_semantic_to_FGBG(label_seg[:, 0])\n sample_idx = weak_dict['sample_idx_1']\n\n selected_num = np.floor(self.annotation_ratio * len(sample_idx)).astype(np.int64)\n selected_sample_idx = sample_idx[:selected_num]\n\n annotation_mask = np.zeros(len(sample_idx), dtype=np.float32)\n annotation_mask[selected_sample_idx] = 1 # 0: point without annotation; 1: point with annotation\n FGBG_gt_mask_seg[annotation_mask == 0] = 3 # 1: Background; 2: Foreground; 3: Unlabelled\n\n pc_seg, not_close = remove_close(pc_seg, radius=1.0)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[not_close]\n pc_seg, filter_idx = filter_pc(pc_seg, extents=self.area_extents)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[filter_idx]\n\n curr_seg_num = pc_seg.shape[0]\n if curr_seg_num >= self.num_points_seg:\n pc_sample_idx = np.random.choice(curr_seg_num, self.num_points_seg, replace=False)\n curr_seg_num = self.num_points_seg\n else:\n pc_sample_idx = np.concatenate((np.arange(curr_seg_num),\n np.random.choice(curr_seg_num, self.num_points_seg - curr_seg_num, replace=True)), axis=-1)\n point_FGBG_gt_mask_seg = FGBG_gt_mask_seg[pc_sample_idx]\n pc_seg = pc_seg[pc_sample_idx]\n\n # get foreground points in three frames for chamfer loss\n if self.annotation_ratio ==1:\n # When using full annotations, we directly extract ground truth foreground points for chamfer loss\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n label_0 = weak_dict['points_label_0']\n FGBG_gt_mask_0 = convert_semantic_to_FGBG(label_0[:, 0]) # 1: Background; 2: Foreground\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, FGBG_gt_mask_0)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_1 = weak_dict['points_label_1']\n FGBG_gt_mask_1 = convert_semantic_to_FGBG(label_1[:, 0])\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, FGBG_gt_mask_1)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n label_2 = weak_dict['points_label_2']\n FGBG_gt_mask_2 = convert_semantic_to_FGBG(label_2[:, 0])\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, FGBG_gt_mask_2)\n else:\n # When using partial annotations, we extract foreground points predicted by PreSegNet for Chamfer loss\n pred_FGBG_file_name = os.path.join(self.FBdata_root, scene_name + '.npz')\n pred_FGBG_data = np.load(pred_FGBG_file_name)\n\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n pred_FGBG_0 = pred_FGBG_data['pred_0']\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, pred_FGBG_0, use_GT_label=False)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n pred_FGBG_1 = pred_FGBG_data['pred_1']\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, pred_FGBG_1, use_GT_label=False)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n pred_FGBG_2 = pred_FGBG_data['pred_2']\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, pred_FGBG_2, use_GT_label=False)\n\n else:\n pc_seg = np.zeros(1)\n point_FGBG_gt_mask_seg = np.zeros(1)\n curr_seg_num = np.zeros(1)\n FG_point_0 = np.zeros(1)\n FG_point_num_0 = np.zeros(1)\n FG_point_1 = np.zeros(1)\n FG_point_num_1 = np.zeros(1)\n FG_point_2 = np.zeros(1)\n FG_point_num_2 = np.zeros(1)\n\n return padded_voxel_points, all_disp_field_gt, pixel_cat_map, \\\n non_empty_map, all_valid_pixel_maps, num_future_pcs, \\\n pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \\\n FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2" }, { "identifier": "DatasetSingleSeq_Stage2", "path": "data/weak_waymo_dataloader.py", "snippet": "class DatasetSingleSeq_Stage2(Dataset):\n \"\"\"\n Generate the Waymo training dataset for Stage2\n\n Parameters\n ----------\n dataset_root : Path to input data root directory\n weakdata_root: Path to weak supervision data root directory\n FBdata_root: Path to FG/BG masks predicted by PreSegNet in Stage1\n split : [train/val]\n annotation_ratio: Desired FG/BG annotation ratio. Should be consistent with the ratio in Stage1\n num_points_seg: Desired number of points in the current frame. Will be used to train the FG/BG segmentation head\n num_points_motion: Desired number of FG points in the three frames. Will be used for Chamfer loss\n \"\"\"\n def __init__(self, dataset_root=None, weakdata_root=None, FBdata_root=None, split='train', future_frame_skip=0, voxel_size=(0.25, 0.25, 0.4),\n area_extents=np.array([[-32., 32.], [-32., 32.], [-1., 4.]]), dims=(256, 256, 13), num_category=5,\n annotation_ratio=1.0, num_points_seg = 40000, num_points_motion = 12000):\n\n if dataset_root is None:\n raise ValueError(\"The {} dataset root is None. Should specify its value.\".format(split))\n\n self.dataset_root = dataset_root\n print(\"data root:\", dataset_root)\n self.weakdata_root = weakdata_root\n self.FBdata_root = FBdata_root\n\n seq_dirs = []\n if split == 'train':\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d)\n seq_dirs.append(tmp_0)\n else:\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d)\n seq_dirs.append(tmp_0)\n\n self.seq_files = seq_dirs\n self.num_sample_seqs = len(self.seq_files)\n print(\"The number of {} sequences: {}\".format(split, self.num_sample_seqs))\n\n # For training, the size of dataset should be 14351; for validation/testing: 3634\n if split == 'train' and self.num_sample_seqs != 14351:\n warnings.warn(\">> The size of training dataset is not 14351.\\n\")\n elif split == 'val' and self.num_sample_seqs != 3634:\n warnings.warn(\">> The size of validation dataset is not 3634.\\n\")\n\n\n self.split = split\n self.voxel_size = voxel_size\n self.area_extents = area_extents\n self.future_frame_skip = future_frame_skip\n self.dims = dims\n self.annotation_ratio = annotation_ratio\n self.num_points_seg = num_points_seg\n self.num_points_motion = num_points_motion\n self.num_category = num_category\n\n def __len__(self):\n return self.num_sample_seqs\n\n def sample_foreground_point(self, pc, FGBG_label, use_GT_label=True):\n pc, not_close = remove_close(pc, radius=1.0)\n pc, filter_idx = filter_pc(pc, extents=self.area_extents)\n\n if use_GT_label:\n FGBG_label = FGBG_label[not_close]\n FGBG_label = FGBG_label[filter_idx]\n FG_mask = FGBG_label == 2\n else:\n FG_mask = FGBG_label\n\n FG_point = pc[FG_mask]\n FG_point_num = FG_point.shape[0]\n\n if FG_point_num != 0:\n if FG_point_num >= self.num_points_motion:\n sample_idx = np.random.choice(FG_point_num, self.num_points_motion, replace=False)\n FG_point_num = self.num_points_motion\n else:\n sample_idx = np.concatenate((np.arange(FG_point_num),\n np.random.choice(FG_point_num, self.num_points_motion - FG_point_num, replace=True)), axis=-1)\n FG_point = FG_point[sample_idx]\n else:\n FG_point = np.zeros((self.num_points_motion, 3))\n\n return FG_point, FG_point_num\n\n def __getitem__(self, idx):\n seq_file = self.seq_files[idx]\n gt_data_handle = np.load(seq_file, allow_pickle=True)\n gt_dict = gt_data_handle.item()\n\n dims = gt_dict['3d_dimension']\n num_future_pcs = gt_dict['num_future_pcs']\n num_past_pcs = gt_dict['num_past_pcs']\n pixel_indices = gt_dict['pixel_indices']\n\n sparse_disp_field_gt = gt_dict['disp_field']\n all_disp_field_gt = np.zeros((num_future_pcs, dims[0], dims[1], 2), dtype=np.float32)\n all_disp_field_gt[:, pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_disp_field_gt[:]\n\n sparse_valid_pixel_maps = gt_dict['valid_pixel_map']\n all_valid_pixel_maps = np.zeros((num_future_pcs, dims[0], dims[1]), dtype=np.float32)\n all_valid_pixel_maps[:, pixel_indices[:, 0], pixel_indices[:, 1]] = sparse_valid_pixel_maps[:]\n\n sparse_pixel_cat_maps = gt_dict['pixel_cat_map']\n pixel_cat_map = np.zeros((dims[0], dims[1], self.num_category), dtype=np.float32)\n pixel_cat_map[pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_pixel_cat_maps[:]\n\n non_empty_map = np.zeros((dims[0], dims[1]), dtype=np.float32)\n non_empty_map[pixel_indices[:, 0], pixel_indices[:, 1]] = 1.0\n\n padded_voxel_points = list()\n for i in range(num_past_pcs):\n indices = gt_dict['voxel_indices_' + str(i)]\n curr_voxels = np.zeros(dims, dtype=np.bool)\n curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1\n padded_voxel_points.append(curr_voxels)\n padded_voxel_points = np.stack(padded_voxel_points, 0).astype(np.float32)\n\n # get weak supervision\n if self.split == 'train':\n scene_name = seq_file.split('/')[-1]\n weak_file_name = os.path.join(self.weakdata_root, scene_name)\n weak_data_handle = np.load(weak_file_name, allow_pickle=True)\n weak_dict = weak_data_handle.item()\n\n # get FG/BG annotations for the current frame,\n # this procedure is the same as the data preparation in Stage1\n # 0: past frame; 1: current frame; 2: future frame\n\n pc_seg = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_seg = weak_dict['points_label_1']\n FGBG_gt_mask_seg = convert_semantic_to_FGBG_waymo(label_seg[:, 0])\n sample_idx = weak_dict['sample_idx_1']\n\n selected_num = np.floor(self.annotation_ratio * len(sample_idx)).astype(np.int64)\n selected_sample_idx = sample_idx[:selected_num]\n\n annotation_mask = np.zeros(len(sample_idx), dtype=np.float32)\n annotation_mask[selected_sample_idx] = 1 # 0: point without annotation; 1: point with annotation\n FGBG_gt_mask_seg[annotation_mask == 0] = 3 # 1: Background; 2: Foreground; 3: Unlabelled\n\n pc_seg, not_close = remove_close(pc_seg, radius=1.0)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[not_close]\n pc_seg, filter_idx = filter_pc(pc_seg, extents=self.area_extents)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[filter_idx]\n\n curr_seg_num = pc_seg.shape[0]\n if curr_seg_num >= self.num_points_seg:\n pc_sample_idx = np.random.choice(curr_seg_num, self.num_points_seg, replace=False)\n curr_seg_num = self.num_points_seg\n else:\n pc_sample_idx = np.concatenate((np.arange(curr_seg_num),\n np.random.choice(curr_seg_num, self.num_points_seg - curr_seg_num, replace=True)), axis=-1)\n point_FGBG_gt_mask_seg = FGBG_gt_mask_seg[pc_sample_idx]\n pc_seg = pc_seg[pc_sample_idx]\n\n # get foreground points in three frames for chamfer loss\n if self.annotation_ratio ==1:\n # When using full annotations, we directly extract ground truth foreground points for chamfer loss\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n label_0 = weak_dict['points_label_0']\n FGBG_gt_mask_0 = convert_semantic_to_FGBG_waymo(label_0[:, 0]) # 1: Background; 2: Foreground\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, FGBG_gt_mask_0)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_1 = weak_dict['points_label_1']\n FGBG_gt_mask_1 = convert_semantic_to_FGBG_waymo(label_1[:, 0])\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, FGBG_gt_mask_1)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n label_2 = weak_dict['points_label_2']\n FGBG_gt_mask_2 = convert_semantic_to_FGBG_waymo(label_2[:, 0])\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, FGBG_gt_mask_2)\n else:\n # When using partial annotations, we extract foreground points predicted by PreSegNet for Chamfer loss\n pred_FGBG_file_name = os.path.join(self.FBdata_root, scene_name.split('.')[0] + '.npz')\n pred_FGBG_data = np.load(pred_FGBG_file_name)\n\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n pred_FGBG_0 = pred_FGBG_data['pred_0']\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, pred_FGBG_0, use_GT_label=False)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n pred_FGBG_1 = pred_FGBG_data['pred_1']\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, pred_FGBG_1, use_GT_label=False)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n pred_FGBG_2 = pred_FGBG_data['pred_2']\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, pred_FGBG_2, use_GT_label=False)\n\n else:\n pc_seg = np.zeros(1)\n point_FGBG_gt_mask_seg = np.zeros(1)\n curr_seg_num = np.zeros(1)\n FG_point_0 = np.zeros(1)\n FG_point_num_0 = np.zeros(1)\n FG_point_1 = np.zeros(1)\n FG_point_num_1 = np.zeros(1)\n FG_point_2 = np.zeros(1)\n FG_point_num_2 = np.zeros(1)\n\n return padded_voxel_points, all_disp_field_gt, pixel_cat_map, \\\n non_empty_map, all_valid_pixel_maps, num_future_pcs, \\\n pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \\\n FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2" }, { "identifier": "FGBG_seg_loss", "path": "loss_utils.py", "snippet": "def FGBG_seg_loss(FGBG_pred, point_FGBG_gt_mask, source_pc, source_num, voxel_size, area_extents):\n \"\"\"\n Foreground Background segmentation loss\n ----------\n\n Inputs:\n FGBG_pred: [B, 2, dim_0, dim_1], predicted Foreground/Background BEV map\n point_FGBG_gt_mask: [B, N], per-point Foreground/Background ground truth, (1: BG, 2: FG, 3: Unannotated)\n source_pc: [B, N, 3], point cloud in current frame\n source_num: [B], unrepeated point number in each sample\n voxel_size, area_extents: voxel size and range of area,\n \"\"\"\n\n batch_size = FGBG_pred.shape[0]\n device = FGBG_pred.device\n\n loss_FGBG_seg = torch.zeros((1), device=device, dtype=FGBG_pred.dtype)\n\n for batch_index in range(batch_size):\n\n # get current batch\n curr_source_num = source_num[batch_index]\n curr_source_pc_np = source_pc[batch_index, :curr_source_num, :].numpy()\n curr_point_FGBG_gt_mask = point_FGBG_gt_mask[batch_index, :curr_source_num].float().to(device) # 1: Background; 2: Foreground; 3: Unannotated\n curr_FGBG_pred = FGBG_pred[batch_index]\n\n # generate FGBG ground truth and weight for each point\n curr_point_BG_gt_mask = (curr_point_FGBG_gt_mask == 1).float().unsqueeze(0)\n curr_point_FG_gt_mask = (curr_point_FGBG_gt_mask == 2).float().unsqueeze(0)\n\n curr_point_FGBG_gt_map = torch.cat([curr_point_BG_gt_mask, curr_point_FG_gt_mask], 0).permute(1, 0)\n\n # weight assigned to different categories. 0.005 for BG; 1.0 for FG; 0.0 for unlabelled\n curr_FGBG_weight_map = (curr_point_BG_gt_mask * 0.005 + curr_point_FG_gt_mask * 1.0).squeeze(0)\n curr_annotated_point_num = torch.sum((curr_point_FGBG_gt_mask != 3).float())\n\n # get FGBG prediction for each point\n curr_voxel_indices = gen_voxel_indices_for_pc(curr_source_pc_np, voxel_size, area_extents)\n curr_point_FGBG_pred = curr_FGBG_pred[:, curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]].permute(1, 0)\n\n # compute current loss\n curr_log_softmax_FGBG_pred = F.log_softmax(curr_point_FGBG_pred, dim=1)\n curr_loss_FGBG_pred = torch.sum(- curr_point_FGBG_gt_map * curr_log_softmax_FGBG_pred, dim=1) * curr_FGBG_weight_map\n curr_loss_FGBG_predd = torch.sum(curr_loss_FGBG_pred) / (curr_annotated_point_num + 1e-6)\n\n # accumulate loss\n loss_FGBG_seg = loss_FGBG_seg + curr_loss_FGBG_predd\n\n loss_FGBG_seg = loss_FGBG_seg / batch_size\n return loss_FGBG_seg" }, { "identifier": "CCD_loss", "path": "loss_utils.py", "snippet": "def CCD_loss(disp_pred, pc_0, pc_0_num, pc_1, pc_1_num, pc_2, pc_2_num, non_empty_map, voxel_size, area_extents,\n epoch, epoch_threshold=10, theta2=1):\n \"\"\"\n Consistency-aware Chamfer Distance loss\n ----------\n\n Inputs:\n disp_pred: [B, 2, dim_0, dim_1], predicted 2D displacement BEV map\n\n pc_0: [B, M, 3], predicted foreground points in the past frame (-0.5s)\n pc_0_num: [B], unrepeated foreground point number in each past frame\n\n pc_1: [B, M, 3], predicted foreground points in the current frame (0s)\n pc_1_num: [B], unrepeated foreground point number in each current frame\n\n pc_2: [B, M, 3], predicted foreground points in the future frame (+0.5s)\n pc_2_num: [B], unrepeated foreground point number in each future frame\n\n non_empty_map: [B, dim_0, dim_1] nonempty mask\n voxel_size, area_extents: voxel size and range of area,\n\n epoch: the number of current training epoch\n epoch_threshold: After epoch_threshold epochs, we start to reweight multi-frame Chamfer loss\n theta2: hyper-parameter in Gaussian kernel, used in Eq.(6)\n \"\"\"\n\n batch_size = disp_pred.shape[0]\n device = disp_pred.device\n loss_disp = torch.zeros((1), device=device, dtype=disp_pred.dtype)\n\n valid_sample_num = 0\n for batch_index in range(batch_size):\n\n # 0: past frame; 1: current frame; 2: future frame\n curr_pc_0_num = pc_0_num[batch_index]\n curr_pc_1_num = pc_1_num[batch_index]\n curr_pc_2_num = pc_2_num[batch_index]\n if (curr_pc_0_num > 0) and (curr_pc_1_num > 0) and (curr_pc_2_num > 0):\n valid_sample_num = valid_sample_num + 1\n curr_valid_map = non_empty_map[batch_index]\n\n # get source and target point clouds, predicted 2D BEV flow\n curr_pc_0_np = pc_0[batch_index, :curr_pc_0_num, :].numpy() # target pc, past frame\n curr_pc_1_np = pc_1[batch_index, :curr_pc_1_num, :].numpy() # current pc, source frame\n curr_pc_2_np = pc_2[batch_index, :curr_pc_2_num, :].numpy() # target pc, future frame\n curr_disp_pred = disp_pred[batch_index, :, :, :]\n\n # get predicted 3D flow for each point\n curr_voxel_indices = gen_voxel_indices_for_pc(curr_pc_1_np, voxel_size, area_extents)\n curr_point_disp_pred = curr_disp_pred[:, curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]].permute(1, 0)\n\n # get FG and BG map for the current frame, the map is estimated by the PreSegNet in Stage1\n curr_fg_map = torch.zeros_like(curr_valid_map)\n curr_fg_map[curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]] = 1\n curr_fg_map = curr_fg_map * curr_valid_map\n fg_voxel_num = torch.sum(curr_fg_map)\n\n curr_bg_map = (1 - curr_fg_map) * curr_valid_map\n bg_voxel_num = torch.sum(curr_bg_map)\n\n curr_pc_0 = torch.from_numpy(curr_pc_0_np).to(device).float()\n curr_pc_1 = torch.from_numpy(curr_pc_1_np).to(device).float()\n curr_pc_2 = torch.from_numpy(curr_pc_2_np).to(device).float()\n curr_point_3d_disp_pred = torch.cat([curr_point_disp_pred, torch.zeros_like(curr_point_disp_pred[:, 0:1])], -1)\n\n # compute confidence weights for the three point clouds\n if epoch > epoch_threshold:\n # After epoch_threshold epochs, we start to reweight multi-frame Chamfer loss\n weight_P, weight_C, weight_F = gen_confidence_weight(curr_pc_0, curr_pc_1, curr_pc_2, curr_point_3d_disp_pred, theta2=theta2)\n else:\n weight_P, weight_C, weight_F = None, None, None\n\n # Consistency-aware Chamfer Distance loss function for the foreground points\n # backward term (backward warped current frame, past frame)\n warped_source_pc_backward = curr_pc_1 - curr_point_3d_disp_pred\n fg_loss_backward = weighted_chamfer_loss(warped_source_pc_backward, curr_pc_0, weight_C, weight_P)\n\n # forward term (forward warped current frame, future frame)\n warped_source_pc_forward = curr_pc_1 + curr_point_3d_disp_pred\n fg_loss_forward = weighted_chamfer_loss(warped_source_pc_forward, curr_pc_2, weight_C, weight_F)\n\n fg_loss = (fg_loss_backward + fg_loss_forward) / 2.0\n\n # generate loss for the background points. Eq.(13)\n bg_gt = torch.zeros_like(curr_disp_pred) # background points are regarded as static\n bg_loss = torch.sum(torch.abs(curr_disp_pred * curr_bg_map.unsqueeze(0) - bg_gt * curr_bg_map.unsqueeze(0)), 0)\n bg_loss = torch.sum(bg_loss) / (torch.sum(curr_bg_map) + 1e-6)\n\n # combine the losses from the foreground and the background. Eq.(12)\n curr_loss = (fg_loss * fg_voxel_num + 0.005 * bg_loss * bg_voxel_num) \\\n / (fg_voxel_num + bg_voxel_num + 1e-6)\n\n loss_disp = loss_disp + curr_loss\n\n loss_disp = loss_disp / valid_sample_num\n return loss_disp" }, { "identifier": "evaluate_FGBG_prediction", "path": "evaluation_utils.py", "snippet": "def evaluate_FGBG_prediction(FGBG_pred, non_empty_map_numpy, pixel_cat_map_gt_numpy, overall_cls_gt, overall_cls_pred,\n datatype='nuScenes'):\n\n # Convert the category map\n max_prob = np.amax(pixel_cat_map_gt_numpy, axis=-1)\n filter_mask = max_prob == 1.0 # Note: some of the cell probabilities are soft probabilities\n pixel_cat_map_numpy = np.argmax(pixel_cat_map_gt_numpy,\n axis=-1) + 1 # category starts from 1 (background), etc\n\n # Convert category label to FG/BG label\n pixel_FGBG_map_numpy = pixel_cat_map_numpy.copy()\n if datatype == 'nuScenes':\n # 1: background or empty; 2: Vehicle; 3: Ped; 4: Bike; 5: Others\n pixel_FGBG_map_numpy[pixel_FGBG_map_numpy > 1] = 2\n elif datatype == 'Waymo':\n # 1: background or empty; 2: Vehicle; 3: Ped; 4: Cyclist; 5: Sign, regarded as background\n tmp = pixel_FGBG_map_numpy.copy()\n pixel_FGBG_map_numpy[tmp > 1] = 2\n pixel_FGBG_map_numpy[(tmp == 5)] = 1\n\n pixel_FGBG_map_numpy = (pixel_FGBG_map_numpy * non_empty_map_numpy * filter_mask).astype(\n np.int32) # 0: Empty; 1: Background; 2: Foreground\n\n FGBG_pred_numpy = FGBG_pred.cpu().numpy()\n FGBG_pred_numpy = np.transpose(FGBG_pred_numpy, (0, 2, 3, 1))\n FGBG_pred_numpy = np.argmax(FGBG_pred_numpy, axis=-1) + 1\n FGBG_pred_numpy = (FGBG_pred_numpy * non_empty_map_numpy * filter_mask).astype(np.int32)\n\n border = 8\n roi_mask = np.zeros_like(non_empty_map_numpy)\n roi_mask[:, border:-border, border:-border] = 1.0\n\n # For computing confusion matrix, in order to compute FG/BG classification accuracy for each category\n count_mask = non_empty_map_numpy * filter_mask * roi_mask\n idx_fg = np.where(count_mask > 0)\n\n overall_cls_gt.append(pixel_FGBG_map_numpy[idx_fg])\n overall_cls_pred.append(FGBG_pred_numpy[idx_fg])\n\n return overall_cls_gt, overall_cls_pred" }, { "identifier": "evaluate_motion_prediction", "path": "evaluation_utils.py", "snippet": "def evaluate_motion_prediction(disp_pred, FGBG_pred, all_disp_field_gt, all_valid_pixel_maps, future_steps,\n distance_intervals, selected_future_sweeps, cell_groups,\n use_FGBG_pred_masking=True, datatype='nuScenes'):\n\n pred_shape = disp_pred.size()\n disp_pred = disp_pred.view(all_disp_field_gt.size(0), -1, pred_shape[-3], pred_shape[-2], pred_shape[-1])\n disp_pred = disp_pred.contiguous()\n disp_pred = disp_pred.cpu().numpy()\n\n if use_FGBG_pred_masking:\n FGBG_pred_numpy = FGBG_pred.cpu().numpy()\n FGBG_pred_numpy = np.argmax(FGBG_pred_numpy, axis=1)\n mask = FGBG_pred_numpy == 0 # predicted Background mask\n\n # For those with very small movements, we consider them as static\n last_pred = disp_pred[:, -1, :, :, :]\n last_pred_norm = np.linalg.norm(last_pred, ord=2, axis=1) # out: (batch, h, w)\n thd_mask = last_pred_norm <= 0.2\n\n cat_weight_map = np.ones_like(FGBG_pred_numpy, dtype=np.float32)\n cat_weight_map[mask] = 0.0\n cat_weight_map[thd_mask] = 0.0\n cat_weight_map = cat_weight_map[:, np.newaxis, np.newaxis, ...] # (batch, 1, 1, h, w)\n\n disp_pred = disp_pred * cat_weight_map # small motion, static, background\n\n\n # Pre-processing\n all_disp_field_gt = all_disp_field_gt.numpy() # (bs, seq, h, w, channel)\n future_steps = future_steps.numpy()[0]\n\n valid_pixel_maps = all_valid_pixel_maps[:, -future_steps:, ...].contiguous()\n valid_pixel_maps = valid_pixel_maps.numpy()\n\n all_disp_field_gt = all_disp_field_gt[:, -future_steps:, ]\n all_disp_field_gt = np.transpose(all_disp_field_gt, (0, 1, 4, 2, 3))\n all_disp_field_gt_norm = np.linalg.norm(all_disp_field_gt, ord=2, axis=2)\n\n upper_thresh = 0.2\n if datatype == 'nuScenes':\n upper_bound = 1 / 20 * upper_thresh\n elif datatype == 'Waymo':\n upper_bound = 1 / 10 * upper_thresh\n\n static_cell_mask = all_disp_field_gt_norm <= upper_bound\n static_cell_mask = np.all(static_cell_mask, axis=1) # along the temporal axis\n moving_cell_mask = np.logical_not(static_cell_mask)\n\n for j, d in enumerate(distance_intervals):\n for slot, s in enumerate((selected_future_sweeps - 1)): # selected_future_sweeps: [4, 8, ...]\n curr_valid_pixel_map = valid_pixel_maps[:, s]\n\n if j == 0: # corresponds to static cells\n curr_mask = np.logical_and(curr_valid_pixel_map, static_cell_mask)\n else:\n # We use the displacement between keyframe and the last sample frame as metrics\n last_gt_norm = all_disp_field_gt_norm[:, -1]\n mask = np.logical_and(d[0] <= last_gt_norm, last_gt_norm < d[1])\n\n curr_mask = np.logical_and(curr_valid_pixel_map, mask)\n curr_mask = np.logical_and(curr_mask, moving_cell_mask)\n\n # we evaluate the performance for cells within the range [-30m, 30m] along both x, y dimensions.\n border = 8\n roi_mask = np.zeros_like(curr_mask, dtype=np.bool_)\n roi_mask[:, border:-border, border:-border] = True\n curr_mask = np.logical_and(curr_mask, roi_mask)\n\n cell_idx = np.where(curr_mask == True)\n\n gt = all_disp_field_gt[:, s]\n pred = disp_pred[:, -1, :, :, :]\n norm_error = np.linalg.norm(gt - pred, ord=2, axis=1)\n\n cell_groups[j][slot].append(norm_error[cell_idx])\n\n return cell_groups" } ]
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np import time import sys import argparse import os from shutil import copytree, copy from weak_model import WeakMotionNet from data.weak_nuscenes_dataloader import DatasetSingleSeq_Stage2 from data.weak_waymo_dataloader import DatasetSingleSeq_Stage2 as DatasetSingleSeq_Stage2_waymo from sklearn.metrics import confusion_matrix from tqdm import tqdm from loss_utils import FGBG_seg_loss, CCD_loss from evaluation_utils import evaluate_FGBG_prediction, evaluate_motion_prediction
12,365
saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': trainset = DatasetSingleSeq_Stage2(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': trainset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=num_workers) print("Training dataset size:", len(trainset)) tmp = args.evaldata evalset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': evalset = DatasetSingleSeq_Stage2(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': evalset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) evalloader = torch.utils.data.DataLoader(evalset, batch_size=1, shuffle=False, num_workers=num_workers) print("Training dataset size:", len(trainset)) model = WeakMotionNet(out_seq_len=out_seq_len, FGBG_category_num=2, height_feat_size=height_feat_size) model = nn.DataParallel(model) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=0.0005) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20, 30, 40], gamma=0.5) if args.resume != '': checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['model_state_dict']) print("Load model from {}, at epoch {}".format(args.resume, start_epoch - 1)) for epoch in range(start_epoch, num_epochs + 1): lr = optimizer.param_groups[0]['lr'] print("Epoch {}, learning rate {}".format(epoch, lr)) if need_log: saver.write("epoch: {}, lr: {}\t".format(epoch, lr)) saver.flush() model.train() loss_FGBG_seg, loss_disp = train(model, trainloader, optimizer, device, epoch, voxel_size, area_extents) model.eval() me_static, me_slow, me_fast, acc_bg, acc_fg = eval(model, evalloader, device) scheduler.step() if need_log: saver.write("loss_FGBG_seg:{}\t loss_disp:{}\n".format(loss_FGBG_seg, loss_disp)) saver.write("me_static:{}\t me_slow:{}\t me_fast:{}\n".format(me_static, me_slow, me_fast)) saver.write("acc_bg:{}\t acc_fg:{}\n".format(acc_bg, acc_fg)) saver.flush() # save model if need_log and (epoch >= 30): save_dict = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'loss_FGBG_seg': loss_FGBG_seg.avg, 'loss_disp': loss_disp.avg} torch.save(save_dict, os.path.join(model_save_path, 'epoch_' + str(epoch) + '_%.3f_%.3f_%.3f_%.3f_%.3f'%(me_static, me_slow, me_fast, acc_bg, acc_fg) + '.pth')) if need_log: saver.close() def train(model, trainloader, optimizer, device, epoch, voxel_size, area_extents): running_loss_FGBG_seg = AverageMeter('FGBG_Seg', ':.6f') # for cell FG/BG segmentation error running_loss_disp= AverageMeter('disp', ':.6f') # for cell motion prediction error # for i, data in enumerate(trainloader, 0): for i, data in tqdm(enumerate(trainloader, 0), total=len(trainloader), smoothing=0.9): padded_voxel_points, _, _, \ non_empty_map, _, _, \ pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \ FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2 = data optimizer.zero_grad() # Move to GPU/CPU padded_voxel_points = padded_voxel_points.to(device) non_empty_map = non_empty_map.to(device) # Make prediction disp_pred, FGBG_pred = model(padded_voxel_points) # Compute and back-propagate the losses loss_FGBG_seg = FGBG_seg_loss(FGBG_pred, point_FGBG_gt_mask_seg, pc_seg, curr_seg_num, voxel_size, area_extents)
""" Train WeakMotionNet in Stage2 Some of the code are modified based on 'train_single_seq.py' in MotionNet. Reference: MotionNet (https://www.merl.com/research/?research=license-request&sw=MotionNet) """ class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path out_seq_len = 1 # The number of future frames we are going to predict height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-md', '--motiondata', default='/path_to/nuScenes/input-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-wd', '--weakdata', default='/path_to/nuScenes/weak-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-FBd', '--FBdata', default='/path_to/nuScenes/FGBG-data/nuscenes_seg_0-01/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('-t', '--evaldata', default='/path_to/nuScenes/input-data/val/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training') parser.add_argument('--batch', default=8, type=int, help='Batch size') parser.add_argument('--nepoch', default=60, type=int, help='Number of epochs') parser.add_argument('--nworker', default=4, type=int, help='Number of workers') parser.add_argument('--log', default=True, action='store_true', help='Whether to log') parser.add_argument('--logpath', default='', help='The path to the output log file') parser.add_argument('--gpu', default='1') parser.add_argument('--annotation_ratio', default=0.01, type=float) args = parser.parse_args() print(args) num_epochs = args.nepoch need_log = args.log BATCH_SIZE = args.batch num_workers = args.nworker os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype annotation_ratio = args.annotation_ratio def main(): start_epoch = 1 # Whether to log the training information if need_log: logger_root = args.logpath if args.logpath != '' else 'logs' time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") if args.resume == '': model_save_path = check_folder(logger_root) model_save_path = check_folder(os.path.join(model_save_path, 'Stage2')) model_save_path = check_folder(os.path.join(model_save_path, time_stamp)) log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "w") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[0:]))) saver.write(args.__repr__() + "\n\n") saver.flush() else: model_save_path = args.resume log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "a") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': trainset = DatasetSingleSeq_Stage2(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': trainset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=num_workers) print("Training dataset size:", len(trainset)) tmp = args.evaldata evalset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': evalset = DatasetSingleSeq_Stage2(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': evalset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) evalloader = torch.utils.data.DataLoader(evalset, batch_size=1, shuffle=False, num_workers=num_workers) print("Training dataset size:", len(trainset)) model = WeakMotionNet(out_seq_len=out_seq_len, FGBG_category_num=2, height_feat_size=height_feat_size) model = nn.DataParallel(model) model = model.to(device) optimizer = optim.Adam(model.parameters(), lr=0.0005) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 20, 30, 40], gamma=0.5) if args.resume != '': checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['model_state_dict']) print("Load model from {}, at epoch {}".format(args.resume, start_epoch - 1)) for epoch in range(start_epoch, num_epochs + 1): lr = optimizer.param_groups[0]['lr'] print("Epoch {}, learning rate {}".format(epoch, lr)) if need_log: saver.write("epoch: {}, lr: {}\t".format(epoch, lr)) saver.flush() model.train() loss_FGBG_seg, loss_disp = train(model, trainloader, optimizer, device, epoch, voxel_size, area_extents) model.eval() me_static, me_slow, me_fast, acc_bg, acc_fg = eval(model, evalloader, device) scheduler.step() if need_log: saver.write("loss_FGBG_seg:{}\t loss_disp:{}\n".format(loss_FGBG_seg, loss_disp)) saver.write("me_static:{}\t me_slow:{}\t me_fast:{}\n".format(me_static, me_slow, me_fast)) saver.write("acc_bg:{}\t acc_fg:{}\n".format(acc_bg, acc_fg)) saver.flush() # save model if need_log and (epoch >= 30): save_dict = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'loss_FGBG_seg': loss_FGBG_seg.avg, 'loss_disp': loss_disp.avg} torch.save(save_dict, os.path.join(model_save_path, 'epoch_' + str(epoch) + '_%.3f_%.3f_%.3f_%.3f_%.3f'%(me_static, me_slow, me_fast, acc_bg, acc_fg) + '.pth')) if need_log: saver.close() def train(model, trainloader, optimizer, device, epoch, voxel_size, area_extents): running_loss_FGBG_seg = AverageMeter('FGBG_Seg', ':.6f') # for cell FG/BG segmentation error running_loss_disp= AverageMeter('disp', ':.6f') # for cell motion prediction error # for i, data in enumerate(trainloader, 0): for i, data in tqdm(enumerate(trainloader, 0), total=len(trainloader), smoothing=0.9): padded_voxel_points, _, _, \ non_empty_map, _, _, \ pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \ FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2 = data optimizer.zero_grad() # Move to GPU/CPU padded_voxel_points = padded_voxel_points.to(device) non_empty_map = non_empty_map.to(device) # Make prediction disp_pred, FGBG_pred = model(padded_voxel_points) # Compute and back-propagate the losses loss_FGBG_seg = FGBG_seg_loss(FGBG_pred, point_FGBG_gt_mask_seg, pc_seg, curr_seg_num, voxel_size, area_extents)
loss_disp = CCD_loss(disp_pred, FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2,
4
2023-11-12 07:03:29+00:00
16k
c3exchange/c3-smartcontracts-v1
contracts_unified/core/main.py
[ { "identifier": "update", "path": "contracts_unified/core/bare_calls/update.py", "snippet": "@Subroutine(TealType.none)\ndef update() -> Expr:\n \"\"\"Implements the contract method called on update\"\"\"\n\n return sender_is_creator()" }, { "identifier": "delete", "path": "contracts_unified/core/bare_calls/update.py", "snippet": "@Subroutine(TealType.none)\ndef delete() -> Expr:\n \"\"\"Implements the contract method called on delete\"\"\"\n\n return sender_is_creator()" }, { "identifier": "account_move", "path": "contracts_unified/core/methods/account_move.py", "snippet": "@ABIReturnSubroutine\ndef account_move(\n source_account: AccountAddress,\n user_op: OperationMetaData,\n delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Moves funds between two accounts\n\n Arguments:\n\n source_account (AccountAddress): Source account address.\n user_op (OperationMetaData): Operation metadata containing destination account, cash and pool.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n _server_data (abi.DynamicBytes): Server data. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n # Constants\n abi_false = abi.Bool()\n\n # Extracted operation data\n data = AccountMoveData()\n cash = abi.make(SignedInstrumentBasket)\n pool = abi.make(SignedInstrumentBasket)\n\n # Sender and receiver accounts\n destination_account = AccountAddress()\n\n # Health check\n health = ExcessMargin()\n\n i = abi.Uint64()\n length = abi.Uint64()\n abi_zero_int = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n abi_zero_int.set(Int(0)),\n\n # Validate sender\n cast(Expr, sender_is_sig_validator()),\n\n # No delegation is allowed for account move\n Assert(delegation_chain.length() == Int(0)),\n\n # Get the source and destination accounts\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.AccountMove)),\n data.destination_account.store_into(destination_account),\n data.cash.store_into(cash),\n data.pool.store_into(pool),\n )\n ),\n\n # Validate the source account is not the destination account\n Assert(source_account.get() != destination_account.get()),\n\n # Check the closer-to-zero condition for the pool basket\n cast(Expr, closer_to_zero(source_account, pool)),\n\n # Update both users to the current index\n length.set(pool.length()),\n For(i.set(Int(0)), i.get() < length.get(), i.set(i.get() + Int(1))).Do(\n pool[i.get()].use(lambda instrument_amount:\n instrument_amount.instrument.use(lambda instrument:\n Seq(\n cast(Expr, perform_pool_move(source_account, instrument, abi_zero_int)),\n cast(Expr, perform_pool_move(destination_account, instrument, abi_zero_int))\n )\n )\n )\n ),\n\n # Perform update\n cast(Expr, signed_account_move_baskets(source_account, destination_account, cash, pool, abi_false, abi_false)),\n\n # Check health\n # NOTE: No need to check old vs new because all account moves make health worse\n health.set(health_check(source_account, abi_false)),\n Assert(Not(signed_ltz(health.get()))),\n )" }, { "identifier": "clean_orders", "path": "contracts_unified/core/methods/clean_orders.py", "snippet": "@ABIReturnSubroutine\ndef clean_orders(\n orders: abi.DynamicArray[OrderData],\n) -> Expr:\n \"\"\"\n Clean any expired orders from the order book\n\n Arguments:\n\n orders: The orders to analyze.\n \"\"\"\n\n i = abi.Uint64()\n length = abi.Uint64()\n order_data = OrderData()\n order_id = abi.make(OrderId)\n\n return Seq(\n # Loop through all orders\n length.set(orders.length()),\n For(i.set(Int(0)), i.get() < length.get(), i.set(i.get() + Int(1))).Do(\n # Check if order is expired\n order_data.set(orders[i.get()]),\n order_data.expiration_time.use(lambda expires:\n If(Global.latest_timestamp() > expires.get())\n .Then(\n # Delete order\n order_id.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_id(order_data))),\n cast(Expr, OrderStateHandler.delete_order_onchain(order_id)),\n )\n ),\n ),\n )" }, { "identifier": "create", "path": "contracts_unified/core/methods/create.py", "snippet": "@ABIReturnSubroutine\ndef create(\n pricecaster_id: EncodedAppId,\n wormhole_token_bridge_id: EncodedAppId,\n liquidation_factors: EncodedLiquidationFactors,\n withdraw_buffer_address: abi.Address,\n signature_validator_address: abi.Address,\n operator_address: abi.Address,\n quant_address: abi.Address,\n fee_target_address: abi.Address,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the contract method called at creation time\"\"\"\n\n return Seq(\n # Generate budget for the call\n setup(opup_budget.get()),\n\n # Initialize global state\n GlobalStateHandler.set_init_timestamp(),\n GlobalStateHandler.set_instrument_count(Int(0)),\n GlobalStateHandler.set_pricecaster_id(pricecaster_id.get()),\n GlobalStateHandler.set_wormhole_bridge_id(wormhole_token_bridge_id.get()),\n GlobalStateHandler.set_liquidation_factors(liquidation_factors.get()),\n GlobalStateHandler.set_withdraw_buffer(withdraw_buffer_address.get()),\n GlobalStateHandler.set_signature_validator(signature_validator_address.get()),\n GlobalStateHandler.set_operator_address(operator_address.get()),\n GlobalStateHandler.set_quant_address(quant_address.get()),\n GlobalStateHandler.set_fee_target(fee_target_address.get()),\n )" }, { "identifier": "deposit", "path": "contracts_unified/core/methods/deposit.py", "snippet": "@ABIReturnSubroutine\ndef deposit(\n account: AccountAddress,\n deposit_txn: abi.Transaction,\n payload: DepositWord,\n instrument_id: InstrumentId,\n instant_pool_move: Amount,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the standard Deposit contract method.\n\n Arguments:\n\n account (AccountAddress): Target account address to deposit to.\n deposit_txn (Transaction): The ABI \"Transaction-Type\" argument referencing the previous transaction to this call in the \"Standard Deposit\" group. Must be of type \"payment\" of \"asset transfer\".\n payload (DepositWord): Payload, must equal to \"Deposit\" string-literal.\n instrument_id (InstrumentId): Instrument to transfer.\n instant_pool_move (Amount): Optional amount to move to instant pool.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n\"\"\"\n\n deposit_asset_id = abi.Uint64()\n deposit_amount = abi.Uint64()\n element = InstrumentListElement()\n\n return Seq(\n # Generate budget for deposit\n setup(opup_budget.get()),\n\n # Validate deposit transaction\n Assert(\n And(\n # We don't really need to check rekey_to field,\n # but it's still good for us if we don't have to support unintended use cases.\n deposit_txn.get().rekey_to() == Global.zero_address(),\n deposit_txn.get().asset_close_to() == Global.zero_address(),\n )\n ),\n\n # Get deposit info from transaction\n Cond(\n [deposit_txn.get().type_enum() == TxnType.AssetTransfer, Seq(\n Assert(deposit_txn.get().asset_receiver() == Global.current_application_address()),\n deposit_asset_id.set(deposit_txn.get().xfer_asset()),\n deposit_amount.set(deposit_txn.get().asset_amount()),\n )],\n [deposit_txn.get().type_enum() == TxnType.Payment, Seq(\n Assert(deposit_txn.get().receiver() == Global.current_application_address()),\n deposit_asset_id.set(Int(0)),\n deposit_amount.set(deposit_txn.get().amount()),\n )],\n ),\n\n # Validate deposit asset is given instrument ID\n element.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n element.asset_id.use(lambda asset_id: Assert(deposit_asset_id.get() == asset_id.get())),\n\n # Perform deposit\n cast(Expr, inner_deposit_asset(account, payload, instrument_id, deposit_amount, instant_pool_move)),\n )" }, { "identifier": "fund_mbr", "path": "contracts_unified/core/methods/fund_mbr.py", "snippet": "@ABIReturnSubroutine\ndef fund_mbr(\n payment_txn: abi.PaymentTransaction,\n) -> Expr:\n \"\"\"Register payment in algos for the MBR fund of the contract\n\n Arguments:\n\n payment_txn: The payment transaction that will fund this contract\"\"\"\n\n return Seq(\n Assert(payment_txn.get().receiver() == Global.current_application_address()),\n GlobalStateHandler.add_mbr_fund(payment_txn.get().amount())\n )" }, { "identifier": "liquidate", "path": "contracts_unified/core/methods/liquidate.py", "snippet": "@ABIReturnSubroutine\ndef liquidate(\n liquidator_account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Performs liquidation of a user's position\"\"\"\n\n # Constants\n abi_false = abi.Bool()\n abi_true = abi.Bool()\n abi_zero = Ratio()\n\n # Liquidation data\n data = LiquidationData()\n\n liquidatee_account = AccountAddress()\n liquidatee_maint_health = ExcessMargin()\n\n cash = abi.make(SignedInstrumentBasket)\n pool = abi.make(SignedInstrumentBasket)\n\n liquidator_health = ExcessMargin()\n\n factors = LiquidationFactors()\n cash_factor = Ratio()\n pool_factor = Ratio()\n\n cash_take_value = Price()\n pool_take_value = Price()\n pool_give_value = Price()\n\n alpha_numerator = ExcessMargin()\n alpha_denominator = ExcessMargin()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n abi_true.set(Int(1)),\n abi_zero.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Extract liquidation data\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.Liquidate)),\n data.liquidatee.store_into(liquidatee_account),\n data.cash.store_into(cash),\n data.pool.store_into(pool),\n )\n ),\n\n # Validate liquidatee is not liquidator\n Assert(liquidatee_account.get() != liquidator_account.get()),\n\n # Validate liquidatee is liquidatable\n liquidatee_maint_health.set(health_check(liquidatee_account, abi_true)),\n Assert(signed_ltz(liquidatee_maint_health.get())),\n\n # Perform netting on liquidatee account\n cast(Expr, perform_netting(liquidatee_account, liquidator_account)),\n\n # Get global constants\n factors.decode(GlobalStateHandler.get_liquidation_factors()),\n cash_factor.set(factors.cash_liquidation_factor),\n pool_factor.set(factors.pool_liquidation_factor),\n\n # Check the closer-to-zero condition for the pool basket\n cast(Expr, closer_to_zero(liquidatee_account, pool)),\n\n # Calculate basket values\n # NOTE: The cash_take_value and pool_give_value use the cash_factor, where as the pool_take_value uses the pool_factor\n # See the formulas from the design doc for more info.\n cash_take_value.set(calculate_basket_value(cash, abi_false, cash_factor, abi_true, abi_true, abi_false)),\n pool_take_value.set(calculate_basket_value(pool, abi_false, pool_factor, abi_true, abi_true, abi_false)),\n pool_give_value.set(calculate_basket_value(pool, abi_true, cash_factor, abi_true, abi_false, abi_false)),\n\n # Check inequality is satisfied\n Assert(cash_take_value.get() + pool_take_value.get() <= pool_give_value.get()),\n\n # Ensure fairness by calculating alpha and scaling the baskets\n # alpha = health(initial) / (initial_haircut * take_assets * price + initial_haircut * (1 - opt_util) * take_liabilities * price - (1 + initial_margin) * give_liabilities * price)\n # NOTE: health_check sets up the local state handler for itself, so we don't need to\n # NOTE: Reusing the above variables for the values used when calculating the denominator\n alpha_numerator.set(health_check(liquidatee_account, abi_false)),\n cash_take_value.set(calculate_basket_value(cash, abi_false, abi_zero, abi_false, abi_false, abi_false)),\n pool_take_value.set(calculate_basket_value(pool, abi_false, abi_zero, abi_false, abi_false, abi_true)),\n pool_give_value.set(calculate_basket_value(pool, abi_true, abi_zero, abi_false, abi_true, abi_false)),\n alpha_denominator.set(pool_give_value.get() - (cash_take_value.get() + pool_take_value.get())),\n\n # Clamp alpha to be between 0 and 1\n alpha_numerator.set(signed_abs(alpha_numerator.get())),\n\n If(alpha_numerator.get() > alpha_denominator.get())\n .Then(alpha_numerator.set(alpha_denominator.get())),\n\n # Scale the basket values to be fair\n cash.set(cast(abi.ReturnedValue, scale_basket(cash, alpha_numerator, alpha_denominator))),\n pool.set(cast(abi.ReturnedValue, scale_basket(pool, alpha_numerator, alpha_denominator))),\n\n # Perform liquidation swaps, all relevant glboal indexes are updated after netting\n cast(Expr, signed_account_move_baskets(liquidatee_account, liquidator_account, cash, pool, abi_false, abi_true)),\n\n # Verify liquidator is still healthy\n # NOTE: Liquidator must always be in the green after liquidation\n # NOTE: Liquidatee will always be healthier by design\n liquidator_health.set(health_check(liquidator_account, abi_false)),\n Assert(Not(signed_ltz(liquidator_health.get()))),\n )" }, { "identifier": "pool_move", "path": "contracts_unified/core/methods/pool_move.py", "snippet": "@ABIReturnSubroutine\ndef pool_move(\n account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Transfers instruments from user's address to the pool\n\n Arguments:\n\n account (AccountAddress): User's account address.\n user_op (OperationMetaData): Operation metadata containing a basket of instruments.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n _server_data (abi.DynamicBytes): Server data. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_false = abi.Bool()\n\n user_old_health = ExcessMargin()\n user_health = ExcessMargin()\n\n data = PoolMoveData()\n instrument = InstrumentId()\n amount = SignedAmount()\n\n user_data = UserInstrumentData()\n price = Price()\n cash = Amount()\n neg_cash = SignedAmount()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Load constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Get basket from user_op.data\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.PoolMove)),\n instrument.set(data.instrument),\n amount.set(data.amount),\n )\n ),\n\n # Get old health\n user_old_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n\n # Move funds\n cast(Expr, perform_pool_move(account, instrument, amount)),\n\n # When there is a negative movement, we need to check that the user can support itself without netting\n If(signed_ltz(amount.get())).Then(\n # Get instrument price\n price.set(cast(abi.ReturnedValue, get_normalized_price(instrument))),\n # Extract user cash\n user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument))),\n cash.set(user_data.cash),\n neg_cash.set(signed_neg(cash.get())),\n # Remove all user cash temporarily\n cast(Expr, signed_add_to_cash(account, instrument, neg_cash)),\n # Recalculate health without netting the borrowed asset, ensure it is positive\n user_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n user_health.set(signed_add(user_health.get(), WideRatio([price.get(), cash.get()], [Int(PRICECASTER_RESCALE_FACTOR)]))),\n Assert(Not(signed_ltz(user_health.get()))),\n # Add all the cash back\n cast(Expr, signed_add_to_cash(account, instrument, cash)),\n ),\n\n # Validate user is still healthy\n user_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n Assert(Or(Not(signed_ltz(user_health.get())), signed_gte(user_health.get(), user_old_health.get()))),\n )" }, { "identifier": "portal_transfer", "path": "contracts_unified/core/methods/portal_transfer.py", "snippet": "@ABIReturnSubroutine\ndef portal_transfer(vaa: abi.DynamicBytes, *, output: abi.DynamicBytes) -> Expr:\n \"\"\"\n\n Called at the end of a transfer from the portal to C3 and\n use as a \"marker\" and VAA source for the deposit operation.\n\n Decoding and validation of the VAA, along with sender check is performed\n in the deposit operation, where this txn is referenced.\n\n \"\"\"\n\n return Seq(\n Assert(Len(vaa.get()) != Int(0), comment=\"Empty VAA\"),\n # Anything works here, since wormhole requires some value\n output.set(Bytes(\"base16\", \"0x00\")),\n )" }, { "identifier": "settle", "path": "contracts_unified/core/methods/settle.py", "snippet": "@ABIReturnSubroutine\ndef settle(\n add_order_txn: abi.ApplicationCallTransaction,\n buy_account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n server_args: SettleExtraData,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Settles two orders\n\n Arguments:\n\n add_order_txn (ApplicationCallTransaction): The previous add_order transaction in this group that added the sell order to the order book.\n buy_account (AccountAddress): The buyer user's account address.\n user_op (OperationMetaData): Operation metadata containing buyer order data.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n server_args (SettleExtraData): Extra data for the settle operation.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_false = abi.Bool()\n add_order_op = OperationMetaData()\n add_order_data = abi.make(abi.DynamicBytes)\n\n buy_order = OrderData()\n sell_order = OrderData()\n\n sell_account = AccountAddress()\n\n buy_order_id = abi.make(OrderId)\n sell_order_id = abi.make(OrderId)\n\n buy_order_onchain = OnChainOrderData()\n sell_order_onchain = OnChainOrderData()\n\n # Amounts for each order's buy/sell side\n buyer_sell_amount = Amount()\n buyer_buy_amount = Amount()\n seller_sell_amount = Amount()\n seller_buy_amount = Amount()\n\n # Remaining amounts for each order's buy/sell side\n buyer_sell_remaining = Amount()\n buyer_borrow_remaining = Amount()\n buyer_repay_remaining = Amount()\n\n seller_sell_remaining = Amount()\n seller_borrow_remaining = Amount()\n seller_repay_remaining = Amount()\n\n # New remaining amounts for each order's buy/sell side\n buyer_new_sell_remaining = Amount()\n buyer_new_borrow_remaining = Amount()\n buyer_new_repay_remaining = Amount()\n\n seller_new_sell_remaining = Amount()\n seller_new_borrow_remaining = Amount()\n seller_new_repay_remaining = Amount()\n\n buyer_new_order_onchain = OnChainOrderData()\n seller_new_order_onchain = OnChainOrderData()\n\n buyer_buy_instrument = InstrumentId()\n buyer_sell_instrument = InstrumentId()\n seller_buy_instrument = InstrumentId()\n seller_sell_instrument = InstrumentId()\n\n buyer_to_send = Amount()\n seller_to_send = Amount()\n\n buyer_to_borrow = Amount()\n seller_to_borrow = Amount()\n buyer_to_repay = Amount()\n seller_to_repay = Amount()\n\n buyer_buy_delta = Amount()\n seller_buy_delta = Amount()\n buyer_sell_delta = Amount()\n seller_sell_delta = Amount()\n\n neg_borrow = SignedAmount()\n\n buyer_fees = Amount()\n seller_fees = Amount()\n\n buyer_old_health = ExcessMargin()\n buyer_health = ExcessMargin()\n seller_old_health = ExcessMargin()\n seller_health = ExcessMargin()\n\n buyer_negative_margin = Boolean()\n seller_negative_margin = Boolean()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Extract the buy order\n user_op.operation.use(lambda op_data:\n Seq(\n buy_order.decode(op_data.get()),\n buy_order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),\n buy_order.account.use(lambda acc: Assert(acc.get() == buy_account.get())),\n )\n ),\n\n # Add the order to the order book\n cast(Expr, OrderStateHandler.add_order(buy_order)),\n\n # Validate the sell order\n Assert(add_order_txn.get().application_id() == Global.current_application_id()),\n Assert(add_order_txn.get().on_completion() == OnComplete.NoOp),\n Assert(add_order_txn.get().application_args.length() == ADD_ORDER_ARG_COUNT),\n Assert(add_order_txn.get().application_args[ARG_INDEX_SELECTOR] == ADD_ORDER_SIG),\n\n # Get the sell order\n sell_account.decode(add_order_txn.get().application_args[ARG_INDEX_ACCOUNT]),\n add_order_op.decode(add_order_txn.get().application_args[ARG_INDEX_OP]),\n add_order_op.operation.store_into(add_order_data),\n sell_order.decode(add_order_data.get()),\n\n # Get order IDs\n buy_order_id.set(OrderStateHandler.get_order_id(buy_order)),\n sell_order_id.set(OrderStateHandler.get_order_id(sell_order)),\n\n # Get on chain order data\n buy_order_onchain.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_onchain(buy_order_id))),\n sell_order_onchain.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_onchain(sell_order_id))),\n\n # Validate the asset pair matches\n buy_order.sell_instrument.store_into(buyer_sell_instrument),\n buy_order.buy_instrument.store_into(buyer_buy_instrument),\n sell_order.sell_instrument.store_into(seller_sell_instrument),\n sell_order.buy_instrument.store_into(seller_buy_instrument),\n\n Assert(buyer_sell_instrument.get() == seller_buy_instrument.get()),\n Assert(buyer_buy_instrument.get() == seller_sell_instrument.get()),\n\n # Validate the orders are not expired\n buy_order.expiration_time.use(lambda expiration_time:\n Assert(expiration_time.get() > Global.latest_timestamp())\n ),\n sell_order.expiration_time.use(lambda expiration_time:\n Assert(expiration_time.get() > Global.latest_timestamp())\n ),\n\n # Validate the orders match\n buyer_sell_amount.set(buy_order.sell_amount),\n buyer_buy_amount.set(buy_order.buy_amount),\n seller_sell_amount.set(sell_order.sell_amount),\n seller_buy_amount.set(sell_order.buy_amount),\n\n Assert(\n BytesGe(\n BytesMul(Itob(buyer_sell_amount.get()), Itob(seller_sell_amount.get())),\n BytesMul(Itob(buyer_buy_amount.get()), Itob(seller_buy_amount.get()))\n )\n ),\n\n # Validate that the swap is fair for both the seller and the buyer\n buyer_to_send.set(server_args.buyer_to_send),\n seller_to_send.set(server_args.seller_to_send),\n\n Assert(\n BytesGe(\n BytesMul(Itob(buyer_to_send.get()), Itob(seller_sell_amount.get())),\n BytesMul(Itob(seller_to_send.get()), Itob(seller_buy_amount.get()))\n )\n ),\n\n Assert(\n BytesGe(\n BytesMul(Itob(seller_to_send.get()), Itob(buyer_sell_amount.get())),\n BytesMul(Itob(buyer_to_send.get()), Itob(buyer_buy_amount.get()))\n )\n ),\n\n # Validate that we are not sending more than allowed\n buyer_sell_remaining.set(buy_order_onchain.sell_remaining),\n Assert(buyer_sell_remaining.get() >= buyer_to_send.get()),\n seller_sell_remaining.set(sell_order_onchain.sell_remaining),\n Assert(seller_sell_remaining.get() >= seller_to_send.get()),\n\n # Validate that we are not borrowing more thn allowed\n buyer_borrow_remaining.set(buy_order_onchain.borrow_remaining),\n buyer_to_borrow.set(server_args.buyer_to_borrow),\n Assert(buyer_borrow_remaining.get() >= buyer_to_borrow.get()),\n\n seller_borrow_remaining.set(sell_order_onchain.borrow_remaining),\n seller_to_borrow.set(server_args.seller_to_borrow),\n Assert(seller_borrow_remaining.get() >= seller_to_borrow.get()),\n\n # Validate that we are not repaying more than allowed\n buyer_repay_remaining.set(buy_order_onchain.repay_remaining),\n buyer_to_repay.set(server_args.buyer_to_repay),\n Assert(buyer_repay_remaining.get() >= buyer_to_repay.get()),\n\n seller_repay_remaining.set(sell_order_onchain.repay_remaining),\n seller_to_repay.set(server_args.seller_to_repay),\n Assert(seller_repay_remaining.get() >= seller_to_repay.get()),\n\n # Validate that the fees are lower than the maximum possible\n buyer_fees.set(server_args.buyer_fees),\n seller_fees.set(server_args.seller_fees),\n Assert(buyer_fees.get() <= (buyer_to_send.get() / MAX_FEES_DIVISOR)),\n Assert(seller_fees.get() <= (buyer_to_send.get() / MAX_FEES_DIVISOR)),\n\n # We shouldn't borrow / repay more than the assets traded, including fees.\n Assert(buyer_to_borrow.get() <= buyer_to_send.get() + buyer_fees.get()),\n Assert(buyer_to_repay.get() <= seller_to_send.get()),\n Assert(seller_to_borrow.get() <= seller_to_send.get()),\n Assert(seller_to_repay.get() <= buyer_to_send.get() - seller_fees.get()),\n\n # Generate the updated order book for the buy order\n buyer_new_sell_remaining.set(buyer_sell_remaining.get() - buyer_to_send.get()),\n buyer_new_borrow_remaining.set(buyer_borrow_remaining.get() - buyer_to_borrow.get()),\n buyer_new_repay_remaining.set(buyer_repay_remaining.get() - buyer_to_repay.get()),\n buyer_new_order_onchain.set(buyer_new_sell_remaining, buyer_new_borrow_remaining, buyer_new_repay_remaining),\n\n # Generate the updated order book for the sell order\n seller_new_sell_remaining.set(seller_sell_remaining.get() - seller_to_send.get()),\n seller_new_borrow_remaining.set(seller_borrow_remaining.get() - seller_to_borrow.get()),\n seller_new_repay_remaining.set(seller_repay_remaining.get() - seller_to_repay.get()),\n seller_new_order_onchain.set(seller_new_sell_remaining, seller_new_borrow_remaining, seller_new_repay_remaining),\n\n # Calculate the swap amounts\n buyer_buy_delta.set(seller_to_send.get()),\n seller_buy_delta.set(buyer_to_send.get() - seller_fees.get()),\n buyer_sell_delta.set(signed_neg(buyer_to_send.get() + buyer_fees.get())),\n seller_sell_delta.set(signed_neg(seller_to_send.get())),\n\n # Update the on chain order data\n OrderStateHandler.set_order_onchain(buy_order_id, buyer_new_order_onchain),\n OrderStateHandler.set_order_onchain(sell_order_id, seller_new_order_onchain),\n\n # Get old health for both users if needed\n buyer_negative_margin.set(server_args.buyer_negative_margin),\n seller_negative_margin.set(server_args.seller_negative_margin),\n\n If(buyer_negative_margin.get()).Then(\n buyer_old_health.set(cast(abi.ReturnedValue, health_check(buy_account, abi_false))),\n ),\n\n If(seller_negative_margin.get()).Then(\n seller_old_health.set(cast(abi.ReturnedValue, health_check(sell_account, abi_false))),\n ),\n\n # Handle borrow updates\n If(buyer_to_borrow.get() > Int(0)).Then(\n neg_borrow.set(signed_neg(buyer_to_borrow.get())),\n cast(Expr, perform_pool_move(buy_account, buyer_sell_instrument, neg_borrow)),\n ),\n If(seller_to_borrow.get() > Int(0)).Then(\n neg_borrow.set(signed_neg(seller_to_borrow.get())),\n cast(Expr, perform_pool_move(sell_account, seller_sell_instrument, neg_borrow)),\n ),\n\n # Perform swap updates\n cast(Expr, signed_add_to_cash(buy_account, buyer_buy_instrument, buyer_buy_delta)),\n cast(Expr, signed_add_to_cash(sell_account, seller_buy_instrument, seller_buy_delta)),\n cast(Expr, signed_add_to_cash(buy_account, buyer_sell_instrument, buyer_sell_delta)),\n cast(Expr, signed_add_to_cash(sell_account, seller_sell_instrument, seller_sell_delta)),\n\n # Collect the fees\n cast(Expr, collect_fees(buyer_sell_instrument, buyer_fees)),\n cast(Expr, collect_fees(seller_buy_instrument, seller_fees)),\n\n # Handle repay updates\n If(buyer_to_repay.get() > Int(0)).Then(\n cast(Expr, perform_pool_move(buy_account, buyer_buy_instrument, buyer_to_repay)),\n ),\n If(seller_to_repay.get() > Int(0)).Then(\n cast(Expr, perform_pool_move(sell_account, seller_buy_instrument, seller_to_repay)),\n ),\n\n # Validate the users are still healthy\n buyer_health.set(cast(abi.ReturnedValue, health_check(buy_account, abi_false))),\n Assert(Or(Not(signed_ltz(buyer_health.get())), And(buyer_negative_margin.get(), signed_gte(buyer_health.get(), buyer_old_health.get())))),\n seller_health.set(cast(abi.ReturnedValue, health_check(sell_account, abi_false))),\n Assert(Or(Not(signed_ltz(seller_health.get())), And(seller_negative_margin.get(), signed_gte(seller_health.get(), seller_old_health.get())))),\n )" }, { "identifier": "add_order", "path": "contracts_unified/core/methods/settle.py", "snippet": "@ABIReturnSubroutine\ndef add_order(\n # NOTE: Any update on this function must update ADD_ORDER_SIG and ADD_ORDER_ARG_COUNT above\n account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n opup_budget: Amount,\n) -> Expr:\n\n \"\"\"Adds an order to the order book\n\n Arguments:\n\n account (AccountAddress): User's account address.\n user_op (OperationMetaData): Operation metadata containing order data.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n order = OrderData()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Validate signature validator' call\n cast(Expr, sender_is_sig_validator()),\n\n # Get order from user_op.data\n user_op.operation.use(lambda op_data:\n Seq(\n order.decode(op_data.get()),\n order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),\n order.account.use(lambda acc: Assert(acc.get() == account.get()))\n )\n ),\n\n # Add order to the order book\n cast(Expr, OrderStateHandler.add_order(order))\n )" }, { "identifier": "update_instrument", "path": "contracts_unified/core/methods/update_instrument.py", "snippet": "@ABIReturnSubroutine\ndef update_instrument(\n info: UpdateInstrumentInfo,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the method that adds an instrument to the Core contract storage box.\n\n Arguments:\n\n info (UpdateInstrumentInfo): Instrument information to add or update.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_zero = abi.Uint64()\n abi_rate_one = abi.Uint64()\n abi_zero_address = abi.Address()\n\n timestamp = RelativeTimestamp()\n\n asset_id = AssetId()\n initial_haircut = Ratio()\n initial_margin = Ratio()\n maintenance_haircut = Ratio()\n maintenance_margin = Ratio()\n optimal_utilization = Ratio()\n min_rate = InterestRate()\n opt_rate = InterestRate()\n max_rate = InterestRate()\n borrow_index = abi.Uint64()\n lend_index = abi.Uint64()\n borrowed = Amount()\n liquidity = Amount()\n entry = InstrumentListElement()\n\n instrument_id = InstrumentId()\n instrument_count = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Validate sender\n Assert(Txn.sender() == GlobalStateHandler.get_quant_address()),\n\n # Initialize the instrument box first if it doesn't exist\n cast(Expr, GlobalStateHandler.initialize()),\n\n # Get init time\n timestamp.set(GlobalStateHandler.get_relative_timestamp()),\n\n # Create the instrument list element\n abi_zero.set(Int(0)),\n abi_rate_one.set(RATE_ONE),\n abi_zero_address.set(Global.zero_address()),\n\n # Extract fields from info\n asset_id.set(info.asset_id),\n initial_haircut.set(info.initial_haircut),\n initial_margin.set(info.initial_margin),\n maintenance_haircut.set(info.maintenance_haircut),\n maintenance_margin.set(info.maintenance_margin),\n optimal_utilization.set(info.optimal_utilization),\n min_rate.set(info.min_rate),\n opt_rate.set(info.opt_rate),\n max_rate.set(info.max_rate),\n\n # Load the current instrument count and validate it\n instrument_id.set(info.instrument_id),\n instrument_count.set(GlobalStateHandler.get_instrument_count()),\n Assert(instrument_id.get() <= instrument_count.get()),\n\n # Validate instrument zero is always algo\n If(instrument_id.get() == Int(0))\n .Then(Assert(asset_id.get() == Int(0))),\n\n # Check for new entry vs old entry\n If(instrument_id.get() == instrument_count.get())\n .Then(\n # Perform optin to asset if needed\n If(asset_id.get() != Int(0), cast(Expr, inner_asset_opt_in(asset_id))),\n\n # Create the new entry\n borrow_index.set(abi_rate_one),\n lend_index.set(abi_rate_one),\n borrowed.set(abi_zero),\n liquidity.set(abi_zero),\n\n # Increase the instrument count\n GlobalStateHandler.set_instrument_count(instrument_count.get() + Int(1)),\n )\n .Else(\n # Not a new instrument, we need to accrue the interest\n cast(Expr, perform_pool_move(abi_zero_address, instrument_id, abi_zero)),\n # Retain the accrued interest values for the new entry\n entry.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n # NOTE: The timestamp should be the same as the one for a new instrument\n entry.borrow_index.store_into(borrow_index),\n entry.lend_index.store_into(lend_index),\n entry.borrowed.store_into(borrowed),\n entry.liquidity.store_into(liquidity),\n ),\n\n # Create the new entry\n entry.set(\n asset_id,\n initial_haircut,\n initial_margin,\n maintenance_haircut,\n maintenance_margin,\n timestamp,\n borrow_index,\n lend_index,\n optimal_utilization,\n min_rate,\n opt_rate,\n max_rate,\n borrowed,\n liquidity,\n ),\n\n # Perform update/insert for entry\n GlobalStateHandler.set_instrument(instrument_id, entry),\n\n # Ensure we have enough funds for mbr\n cast(Expr, GlobalStateHandler.ensure_mbr_fund()),\n )" }, { "identifier": "update_parameter", "path": "contracts_unified/core/methods/update_parameter.py", "snippet": "@ABIReturnSubroutine\ndef update_parameter(\n key_to_update: abi.DynamicBytes,\n updated_value: abi.DynamicBytes,\n) -> Expr:\n \"\"\"Implements the method that changes a global parameter of the contract.\n\n Arguments:\n\n key_to_update (abi.DynamicBytes): Key of the parameter to update\n updated_value (abi.DynamicBytes): New value of the parameter\n\n \"\"\"\n\n key = ScratchVar(TealType.bytes)\n value = ScratchVar(TealType.bytes)\n\n return Seq(\n key.store(key_to_update.get()),\n value.store(updated_value.get()),\n If(key.load() == KEY_LIQUIDATION_FACTORS).Then(\n Assert(GlobalStateHandler.get_quant_address() == Txn.sender()),\n GlobalStateHandler.set_liquidation_factors(value.load())\n ).Else(\n Assert(Global.creator_address() == Txn.sender()),\n Cond(\n [key.load() == KEY_PRICECASTER_ID, GlobalStateHandler.set_pricecaster_id(value.load())],\n [key.load() == KEY_WORMHOLE_BRIDGE_ID, GlobalStateHandler.set_wormhole_bridge_id(value.load())],\n [key.load() == KEY_SIGNATURE_VALIDATOR, GlobalStateHandler.set_signature_validator(value.load())],\n [key.load() == KEY_QUANT_ADDRESS, GlobalStateHandler.set_quant_address(value.load())],\n [key.load() == KEY_FEE_TARGET, GlobalStateHandler.set_fee_target(value.load())],\n [key.load() == KEY_WITHDRAW_BUFFER, GlobalStateHandler.set_withdraw_buffer(value.load())],\n [key.load() == KEY_OPERATOR_ADDRESS, GlobalStateHandler.set_operator_address(value.load())],\n )\n )\n )" }, { "identifier": "withdraw", "path": "contracts_unified/core/methods/withdraw.py", "snippet": "@ABIReturnSubroutine\ndef withdraw(\n account: AccountAddress,\n user_op: OperationMetaData,\n delegation_chain: DelegationChain,\n server_params: WithdrawExtraData,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Withdraws funds from a user and sends them to a given Wormhole or Algorand address, depending on target chain\n\n Args:\n\n account (AccountAddress): The user account address.\n user_op (OperationMetaData): The user operation metadata. This contains signed withdraw data: instrument, amount, receiver, and maximum amount to borrow.\n delegation_chain (DelegationChain): The delegation chain. For withdraw operations this must be empty.\n server_params (abi.Uint64): The server parameters. For withdraw, this parameter just contains server' own balance.\n opup_budget (Amount): Additional computation budget for the operation.\n\n \"\"\"\n\n # Holds the withdraw buffer address\n wormhole_withdraw_buffer = abi.Address()\n\n # Constants\n abi_false = abi.Bool()\n\n # Holds extracted withdraw data from the user_op\n withdraw_data = WithdrawData()\n\n # Holds extracted withdraw data from the user_op\n instrument_id = InstrumentId()\n amount = Amount()\n receiver = WormholeAddress()\n max_borrow = Amount()\n amount_to_deduct = SignedAmount()\n amount_to_withdraw = SignedAmount()\n amount_to_borrow = SignedAmount()\n max_fees = Amount()\n\n # User balance, to calculate the cash/pool split of the withdrawal\n position = UserInstrumentData()\n balance = Amount()\n\n # Fees to be collected\n withdraw_fee = Amount()\n\n # Used to validate the user's health\n user_health = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Load constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # No delegation is allowed for withdraw\n Assert(delegation_chain.length() == Int(0)),\n\n # Decode and extract withdraw operation\n user_op.operation.use(lambda op_data:\n Seq(\n withdraw_data.decode(op_data.get()),\n withdraw_data.operation.use(lambda op: Assert(op.get() == OperationId.Withdraw)),\n withdraw_data.instrument.store_into(instrument_id),\n withdraw_data.amount.store_into(amount),\n withdraw_data.receiver.store_into(receiver),\n withdraw_data.max_borrow.store_into(max_borrow),\n withdraw_data.max_fees.store_into(max_fees),\n )\n ),\n\n # Calculate cash and pool withdrawal amounts\n position.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n balance.set(position.cash),\n server_params.locked_cash.use(lambda locked_cash:\n balance.set(balance.get() - locked_cash.get()),\n ),\n\n # Get the fees\n withdraw_fee.set(server_params.withdraw_fee),\n\n # Do not exceed maximum fee limit specified in request.\n Assert(withdraw_fee.get() <= max_fees.get()),\n\n # Validate the user is not borrowing more than they have allowed\n Assert(amount.get() <= max_borrow.get() + balance.get()),\n\n # Calculate withdrawal amounts\n If(amount.get() > balance.get())\n .Then(\n amount_to_borrow.set(signed_neg(amount.get() - balance.get())),\n )\n .Else(\n amount_to_borrow.set(Int(0)),\n ),\n # This is the delta value to apply to the user cash\n amount_to_deduct.set(signed_neg(amount.get())),\n # This is the amount the user will actually get, implicitly fails if fees are bigger than the amount\n amount_to_withdraw.set(amount.get() - withdraw_fee.get()),\n\n # Borrow if needed\n If(amount_to_borrow.get() != Int(0))\n .Then(cast(Expr, perform_pool_move(account, instrument_id, amount_to_borrow))),\n\n # Remove assets\n cast(Expr, signed_add_to_cash(account, instrument_id, amount_to_deduct)),\n\n # Pay fees\n cast(Expr, collect_fees(instrument_id, withdraw_fee)),\n\n # Validate user is still healthy\n # NOTE: Withdraw always makes the user less healthy, so we don't need to check\n # the user's health before the withdrawal\n user_health.set(health_check(account, abi_false)),\n Assert(Not(signed_ltz(user_health.get()))),\n\n # Now that assets/liabilities are up to date, send out payment transaction.\n # If we are withdrawing to offchain, we need to check wormhole transactions\n wormhole_withdraw_buffer.set(GlobalStateHandler.get_withdraw_buffer()),\n receiver.chain_id.use(lambda chain_id:\n receiver.address.use(lambda address:\n If(\n chain_id.get() == Int(ALGORAND_CHAIN_ID),\n cast(Expr, submit_withdraw_onchain(address, instrument_id, amount_to_withdraw)),\n cast(Expr, submit_withdraw_offchain(wormhole_withdraw_buffer, instrument_id, amount_to_withdraw)),\n )\n )\n ),\n )" }, { "identifier": "wormhole_deposit", "path": "contracts_unified/core/methods/wormhole_deposit.py", "snippet": "@ABIReturnSubroutine\ndef wormhole_deposit(\n portal_transfer_txn: abi.ApplicationCallTransaction,\n account: AccountAddress,\n payload: DepositWord,\n instrument_id: InstrumentId,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the contract method called during an ASA deposit via Wormhole.\n\n Arguments:\n\n portal_transfer_txn (ApplicationCallTransaction): The ABI \"ApplicationCallTransaction\" argument referencing the previous transaction to this call in the \"Wormhole Deposit\" group. Must be of type \"application call\".\n account (AccountAddress): Target account address to deposit to.\n payload (DepositWord): Payload, must equal to \"WormholeDeposit\" string-literal.\n instrument_id (InstrumentId): Instrument to transfer.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n ----------------------------------------------------------------------------------------------------------------------------------\n\n Security rationale: The completeTransfer method of the Wormhole Token Bridge guarantees that:\n\n - The VAA was processed by the vaaVerify method of the Wormhole Core.\n - The VAA matches the completeTransfer arg.\n - The portal_transfer method exists in the group and has the proper target appId matching the Vaa.\n - The portal_transfer method has the correct sender (the server in our case)\n\n If we can ensure that the completeTransfer method exists in the group and it's from\n the canonical Wormhole Token Bridge Appid, we can transitively check remaining properties\n for additional security.\n\n Additionally, the innertxn doing the transfer actually uses the VAA information which\n we ensure is correct for the three sources: this method, the completeTransfer method and the\n vaaVerify method in the Core.\n\n ref: https://github.com/wormhole-foundation/wormhole/blob/5255e933d68629f0643207b0f9d3fa797af5cbf7/algorand/token_bridge.py#L466\n\n \"\"\"\n\n vaa = portal_transfer_txn.get().application_args[1]\n complete_transfer_txn = Gtxn[portal_transfer_txn.get().group_index() - Int(1)]\n decoded_payload = DecodedWormholePayload()\n abi_vaa = abi.make(abi.DynamicBytes)\n abi_amount = abi.Uint64()\n abi_repay_amount = abi.Uint64()\n abi_receiver = abi.Address()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Ensure there are no rogue transactions past the box-budget setup\n Assert(Global.group_size() == Txn.group_index() + Int(2), comment=\"Unknown transactions ahead detected\"),\n\n # Ensure completeTransfer from canonical Wormhole Token Bridge exists.\n Assert(complete_transfer_txn.application_args[0] == Bytes(\"completeTransfer\"), comment=\"expected completeTransfer method call\"),\n Assert(complete_transfer_txn.application_id() == GlobalStateHandler.get_wormhole_bridge_id(), comment=\"completeTransfer call appId unknown\"),\n\n # In our current design, owner == creator, so this is valid. What we should check?\n Assert(complete_transfer_txn.sender() == GlobalStateHandler.get_operator_address(), comment=\"completeTransfer call sender unknown\"),\n\n # Ensure VAAs match\n abi_vaa.decode(vaa),\n\n # The completeTransfer code ensures his VAA equals portal_transfer VAA, we check here\n # if we match our VAA\n Assert(complete_transfer_txn.application_args[1] == abi_vaa.get(), comment=\"VAAs do not match\"),\n\n # Decode the VAA\n decoded_payload.set(cast(abi.ReturnedValue, decode_wormhole_payload(abi_vaa))),\n abi_amount.set(decoded_payload.amount),\n abi_repay_amount.set(decoded_payload.repay_amount),\n abi_receiver.set(decoded_payload.receiver),\n\n # Validate the VAA, do we need more checks?\n XAssert(\n abi_receiver.get() == account.get(),\n comment=\"Receiving user address mismatch\",\n ),\n\n # Perform deposit\n cast(Expr, inner_deposit_asset(account, payload, instrument_id, abi_amount, abi_repay_amount)),\n )" } ]
from pyteal import ( BareCallActions, CallConfig, MethodConfig, OnCompleteAction, OptimizeOptions, Reject, Router, ) from contracts_unified.core.bare_calls import delete, update from contracts_unified.core.methods import ( account_move, add_order, clean_orders, create, deposit, fund_mbr, liquidate, pool_move, portal_transfer, settle, update_instrument, update_parameter, withdraw, wormhole_deposit, )
11,912
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()), delete_application=OnCompleteAction.always(delete()), ), clear_state=Reject(), ) CORE_ROUTER.add_method_handler( create, "create", MethodConfig(no_op=CallConfig.CREATE), "Create C3 Core contract", ) CORE_ROUTER.add_method_handler( update_instrument, "update_instrument", MethodConfig(no_op=CallConfig.CALL), "Add a new instrument (ASA) to the Core", ) CORE_ROUTER.add_method_handler( update_parameter, "update_parameter", MethodConfig(no_op=CallConfig.CALL), "Update a global parameter", ) CORE_ROUTER.add_method_handler( deposit, "deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account", ) CORE_ROUTER.add_method_handler( wormhole_deposit, "wormhole_deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account via Wormhole", ) CORE_ROUTER.add_method_handler( pool_move, "pool_move", MethodConfig(no_op=CallConfig.CALL), "Transfer instruments between user and pool", ) CORE_ROUTER.add_method_handler( add_order, "add_order", MethodConfig(no_op=CallConfig.CALL), "Add an order to the order book", ) CORE_ROUTER.add_method_handler( settle, "settle", MethodConfig(no_op=CallConfig.CALL), "Settle two orders" ) CORE_ROUTER.add_method_handler( withdraw, "withdraw", MethodConfig(no_op=CallConfig.CALL), "Withdraw funds from user account", ) CORE_ROUTER.add_method_handler(
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()), delete_application=OnCompleteAction.always(delete()), ), clear_state=Reject(), ) CORE_ROUTER.add_method_handler( create, "create", MethodConfig(no_op=CallConfig.CREATE), "Create C3 Core contract", ) CORE_ROUTER.add_method_handler( update_instrument, "update_instrument", MethodConfig(no_op=CallConfig.CALL), "Add a new instrument (ASA) to the Core", ) CORE_ROUTER.add_method_handler( update_parameter, "update_parameter", MethodConfig(no_op=CallConfig.CALL), "Update a global parameter", ) CORE_ROUTER.add_method_handler( deposit, "deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account", ) CORE_ROUTER.add_method_handler( wormhole_deposit, "wormhole_deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account via Wormhole", ) CORE_ROUTER.add_method_handler( pool_move, "pool_move", MethodConfig(no_op=CallConfig.CALL), "Transfer instruments between user and pool", ) CORE_ROUTER.add_method_handler( add_order, "add_order", MethodConfig(no_op=CallConfig.CALL), "Add an order to the order book", ) CORE_ROUTER.add_method_handler( settle, "settle", MethodConfig(no_op=CallConfig.CALL), "Settle two orders" ) CORE_ROUTER.add_method_handler( withdraw, "withdraw", MethodConfig(no_op=CallConfig.CALL), "Withdraw funds from user account", ) CORE_ROUTER.add_method_handler(
portal_transfer,
9
2023-11-17 20:54:15+00:00
16k